Skip to content

Clarify uses of kernel_start parameter #38

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion coverage_config_aarch64.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"coverage_score": 80.8,
"coverage_score": 80.7,
"exclude_path": "",
"crate_features": "pe"
}
2 changes: 1 addition & 1 deletion coverage_config_x86_64.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"coverage_score": 84.8,
"coverage_score": 83.4,
"exclude_path": "",
"crate_features": "bzimage,elf",
"exclude_path": "loader_gen"
Expand Down
26 changes: 23 additions & 3 deletions src/loader/aarch64/pe/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
// Copyright © 2020, Oracle and/or its affiliates.
// Copyright (c) 2019 Intel Corporation. All rights reserved.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
Expand Down Expand Up @@ -48,6 +49,8 @@ pub enum Error {
InvalidImage,
/// Invalid Image magic number.
InvalidImageMagicNumber,
/// Invalid base address alignment
InvalidBaseAddrAlignment,
}

impl error::Error for Error {
Expand All @@ -63,6 +66,7 @@ impl error::Error for Error {
Error::InvalidImageMagicNumber => "Invalid Image magic number",
Error::DtbTooBig => "Device tree image too big",
Error::ReadKernelImage => "Unable to read kernel image",
Error::InvalidBaseAddrAlignment => "Base address not aligned to 2 MB",
}
}
}
Expand Down Expand Up @@ -96,15 +100,15 @@ impl KernelLoader for PE {
/// # Arguments
///
/// * `guest_mem` - The guest memory where the kernel image is loaded.
/// * `kernel_start` - The offset into 'guest_mem' at which to load the kernel.
/// * `kernel_offset` - 2MB-aligned base addres in guest memory at which to load the kernel.
/// * `kernel_image` - Input Image format kernel image.
/// * `highmem_start_address` - ignored on ARM64.
///
/// # Returns
/// * KernelLoaderResult
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_offset: Option<GuestAddress>,
kernel_image: &mut F,
_highmem_start_address: Option<GuestAddress>,
) -> Result<KernelLoaderResult>
Expand Down Expand Up @@ -135,7 +139,15 @@ impl KernelLoader for PE {
text_offset = 0x80000;
}

let mem_offset = kernel_start
// Validate that kernel_offset is 2 MB aligned, as required by the
// arm64 boot protocol
if let Some(kernel_offset) = kernel_offset {
if kernel_offset.raw_value() % 0x0020_0000 != 0 {
return Err(Error::InvalidBaseAddrAlignment.into());
}
}

let mem_offset = kernel_offset
.unwrap_or(GuestAddress(0))
.checked_add(text_offset)
.ok_or(Error::InvalidImage)?;
Expand Down Expand Up @@ -218,6 +230,14 @@ mod tests {
assert_eq!(loader_result.kernel_load.raw_value(), 0x280000);
assert_eq!(loader_result.kernel_end, 0x281000);

// Attempt to load the kernel at an address that is not aligned to 2MB boundary
let kernel_offset = GuestAddress(0x0030_0000);
let loader_result = PE::load(&gm, Some(kernel_offset), &mut Cursor::new(&image), None);
assert_eq!(
loader_result,
Err(KernelLoaderError::Pe(Error::InvalidBaseAddrAlignment))
);

image[0x39] = 0x0;
let loader_result = PE::load(&gm, Some(kernel_addr), &mut Cursor::new(&image), None);
assert_eq!(
Expand Down
11 changes: 6 additions & 5 deletions src/loader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,11 @@ pub struct KernelLoaderResult {
/// See https://www.kernel.org/doc/Documentation/x86/boot.txt.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub setup_header: Option<bootparam::setup_header>,
/// This field optionally holds the address of a PVH entry point, indicating that
/// the kernel supports the PVH boot protocol as described in:
/// Availability of a PVH entry point. Only used for ELF boot, indicates whether the kernel
/// supports the PVH boot protocol as described in:
/// https://xenbits.xen.org/docs/unstable/misc/pvh.html
pub pvh_entry_addr: Option<GuestAddress>,
#[cfg(all(feature = "elf", any(target_arch = "x86", target_arch = "x86_64")))]
pub pvh_boot_cap: elf::PvhBootCapability,
}

/// Trait that specifies kernel image loading support.
Expand All @@ -145,14 +146,14 @@ pub trait KernelLoader {
/// # Arguments
///
/// * `guest_mem`: [`GuestMemory`] to load the kernel in.
/// * `kernel_start`: Address in guest memory where the kernel is loaded.
/// * `kernel_offset`: Usage varies between implementations.
/// * `kernel_image`: Kernel image to be loaded.
/// * `highmem_start_address`: Address where high memory starts.
///
/// [`GuestMemory`]: https://docs.rs/vm-memory/latest/vm_memory/guest_memory/trait.GuestMemory.html
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_offset: Option<GuestAddress>,
kernel_image: &mut F,
highmem_start_address: Option<GuestAddress>,
) -> Result<KernelLoaderResult>
Expand Down
18 changes: 9 additions & 9 deletions src/loader/x86_64/bzimage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ impl KernelLoader for BzImage {
/// # Arguments
///
/// * `guest_mem`: [`GuestMemory`] to load the kernel in.
/// * `kernel_start`: Address in guest memory where the kernel is loaded.
/// * `kernel_offset`: Address in guest memory where the kernel is loaded.
/// * `kernel_image` - Input bzImage image.
/// * `highmem_start_address`: Address where high memory starts.
///
Expand Down Expand Up @@ -98,7 +98,7 @@ impl KernelLoader for BzImage {
/// [`GuestMemory`]: https://docs.rs/vm-memory/latest/vm_memory/guest_memory/trait.GuestMemory.html
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_offset: Option<GuestAddress>,
kernel_image: &mut F,
highmem_start_address: Option<GuestAddress>,
) -> Result<KernelLoaderResult>
Expand Down Expand Up @@ -144,7 +144,7 @@ impl KernelLoader for BzImage {
return Err(KernelLoaderError::InvalidKernelStartAddress);
}

let mem_offset = match kernel_start {
let mem_offset = match kernel_offset {
Some(start) => start,
None => GuestAddress(u64::from(boot_header.code32_start)),
};
Expand Down Expand Up @@ -197,13 +197,13 @@ mod tests {
fn test_load_bzImage() {
let gm = create_guest_mem();
let image = make_bzimage();
let mut kernel_start = GuestAddress(0x200000);
let mut kernel_offset = GuestAddress(0x200000);
let mut highmem_start_address = GuestAddress(0x0);

// load bzImage with good kernel_start and himem_start setting
// load bzImage with good kernel_offset and himem_start setting
let mut loader_result = BzImage::load(
&gm,
Some(kernel_start),
Some(kernel_offset),
&mut Cursor::new(&image),
Some(highmem_start_address),
)
Expand All @@ -215,7 +215,7 @@ mod tests {
assert_eq!(loader_result.setup_header.unwrap().loadflags, 1);
assert_eq!(loader_result.kernel_end, 0x60c320);

// load bzImage without kernel_start
// load bzImage without kernel_offset
loader_result = BzImage::load(
&gm,
None,
Expand All @@ -231,14 +231,14 @@ mod tests {
assert_eq!(loader_result.kernel_load.raw_value(), 0x100000);

// load bzImage with a bad himem setting
kernel_start = GuestAddress(0x1000);
kernel_offset = GuestAddress(0x1000);
highmem_start_address = GuestAddress(0x200000);

assert_eq!(
Some(KernelLoaderError::InvalidKernelStartAddress),
BzImage::load(
&gm,
Some(kernel_start),
Some(kernel_offset),
&mut Cursor::new(&image),
Some(highmem_start_address),
)
Expand Down
97 changes: 81 additions & 16 deletions src/loader/x86_64/elf/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
// Copyright © 2020, Oracle and/or its affiliates.
// Copyright (c) 2019 Intel Corporation. All rights reserved.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
Expand Down Expand Up @@ -85,6 +86,39 @@ impl error::Error for Error {
}
}

#[derive(Clone, Copy, Debug, PartialEq)]
/// Availability of PVH entry point in the kernel, which allows the VMM
/// to use the PVH boot protocol to start guests.
pub enum PvhBootCapability {
/// PVH entry point is present
PvhEntryPresent(GuestAddress),
/// PVH entry point is not present
PvhEntryNotPresent,
/// PVH entry point is ignored, even if available
PvhEntryIgnored,
}

impl Default for PvhBootCapability {
fn default() -> Self {
PvhBootCapability::PvhEntryIgnored
}
}

impl Display for PvhBootCapability {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> fmt::Result {
use self::PvhBootCapability::*;
match self {
PvhEntryPresent(pvh_entry_addr) => write!(
f,
"PVH entry point present at guest address: {:#x}",
pvh_entry_addr.raw_value()
),
PvhEntryNotPresent => write!(f, "PVH entry point not present"),
PvhEntryIgnored => write!(f, "PVH entry point ignored"),
}
}
}

impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Kernel Loader Error: {}", self.description())
Expand Down Expand Up @@ -121,12 +155,15 @@ impl Elf {
impl KernelLoader for Elf {
/// Loads a kernel from a vmlinux elf image into guest memory.
///
/// The kernel is loaded into guest memory at offset `phdr.p_paddr` specified by the elf image.
/// By default, the kernel is loaded into guest memory at offset `phdr.p_paddr` specified
/// by the elf image. When used, `kernel_offset` specifies a fixed offset from `phdr.p_paddr`
/// at which to load the kernel. If `kernel_offset` is requested, the `pvh_entry_addr` field
/// of the result will not be populated.
///
/// # Arguments
///
/// * `guest_mem`: [`GuestMemory`] to load the kernel in.
/// * `kernel_start`: Address in guest memory where the kernel is loaded.
/// * `kernel_offset`: Offset to be added to default kernel load address in guest memory.
/// * `kernel_image` - Input vmlinux image.
/// * `highmem_start_address`: Address where high memory starts.
///
Expand Down Expand Up @@ -154,7 +191,7 @@ impl KernelLoader for Elf {
/// [`GuestMemory`]: https://docs.rs/vm-memory/latest/vm_memory/guest_memory/trait.GuestMemory.html
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_offset: Option<GuestAddress>,
kernel_image: &mut F,
highmem_start_address: Option<GuestAddress>,
) -> Result<KernelLoaderResult>
Expand All @@ -181,8 +218,8 @@ impl KernelLoader for Elf {
let mut loader_result: KernelLoaderResult = Default::default();

// Address where the kernel will be loaded.
loader_result.kernel_load = match kernel_start {
Some(start) => GuestAddress(start.raw_value() + (ehdr.e_entry as u64)),
loader_result.kernel_load = match kernel_offset {
Some(k_offset) => GuestAddress(k_offset.raw_value() + (ehdr.e_entry as u64)),
None => GuestAddress(ehdr.e_entry as u64),
};

Expand All @@ -204,8 +241,17 @@ impl KernelLoader for Elf {
for phdr in phdrs {
if phdr.p_type != elf::PT_LOAD || phdr.p_filesz == 0 {
if phdr.p_type == elf::PT_NOTE {
// This segment describes a Note, check if PVH entry point is encoded.
loader_result.pvh_entry_addr = parse_elf_note(&phdr, kernel_image)?;
// The PVH boot protocol currently requires that the kernel is loaded at
// the default kernel load address in guest memory (specified at kernel
// build time by the value of CONFIG_PHYSICAL_START). Therefore, only
// attempt to use PVH if an offset from the default load address has not
// been requested using the kernel_offset parameter.
if let Some(_offset) = kernel_offset {
loader_result.pvh_boot_cap = PvhBootCapability::PvhEntryIgnored;
} else {
// If kernel_offset is not requested, check if PVH entry point is present
loader_result.pvh_boot_cap = parse_elf_note(&phdr, kernel_image)?;
}
}
continue;
}
Expand All @@ -216,8 +262,8 @@ impl KernelLoader for Elf {

// if the vmm does not specify where the kernel should be loaded, just
// load it to the physical address p_paddr for each segment.
let mem_offset = match kernel_start {
Some(start) => start
let mem_offset = match kernel_offset {
Some(k_offset) => k_offset
.checked_add(phdr.p_paddr as u64)
.ok_or(Error::InvalidProgramHeaderAddress)?,
None => GuestAddress(phdr.p_paddr as u64),
Expand Down Expand Up @@ -248,9 +294,9 @@ const PVH_NOTE_STR_SZ: usize = 4;
/// of type `XEN_ELFNOTE_PHYS32_ENTRY` (0x12). Notes of this type encode a physical 32-bit entry
/// point address into the kernel, which is used when launching guests in 32-bit (protected) mode
/// with paging disabled, as described by the PVH boot protocol.
/// Returns the encoded entry point address, or `None` if no `XEN_ELFNOTE_PHYS32_ENTRY` entries are
/// found in the note header.
fn parse_elf_note<F>(phdr: &elf::Elf64_Phdr, kernel_image: &mut F) -> Result<Option<GuestAddress>>
/// Returns the encoded entry point address, or `None` if no `XEN_ELFNOTE_PHYS32_ENTRY` entries
/// are found in the note header.
fn parse_elf_note<F>(phdr: &elf::Elf64_Phdr, kernel_image: &mut F) -> Result<PvhBootCapability>
where
F: Read + Seek,
{
Expand Down Expand Up @@ -299,7 +345,7 @@ where

if read_size >= phdr.p_filesz as usize {
// PVH ELF note not found, nothing else to do.
return Ok(None);
return Ok(PvhBootCapability::PvhEntryNotPresent);
}

// Otherwise the correct note type was found.
Expand All @@ -324,7 +370,7 @@ where
.read_exact(&mut pvh_addr_bytes)
.map_err(|_| Error::ReadNoteHeader)?;

Ok(Some(GuestAddress(
Ok(PvhBootCapability::PvhEntryPresent(GuestAddress(
u32::from_le_bytes(pvh_addr_bytes).into(),
)))
}
Expand Down Expand Up @@ -457,15 +503,34 @@ mod tests {
let gm = create_guest_mem();
let pvhnote_image = make_elfnote();
let loader_result = Elf::load(&gm, None, &mut Cursor::new(&pvhnote_image), None).unwrap();
assert_eq!(loader_result.pvh_entry_addr.unwrap().raw_value(), 0x1e1fe1f);
assert_eq!(
loader_result.pvh_boot_cap,
PvhBootCapability::PvhEntryPresent(GuestAddress(0x1e1fe1f))
);

// Verify that PVH is ignored when kernel_start is requested
let loader_result = Elf::load(
&gm,
Some(GuestAddress(0x0020_0000)),
&mut Cursor::new(&pvhnote_image),
None,
)
.unwrap();
assert_eq!(
loader_result.pvh_boot_cap,
PvhBootCapability::PvhEntryIgnored
);
}

#[test]
fn test_dummy_elfnote() {
let gm = create_guest_mem();
let dummynote_image = make_dummy_elfnote();
let loader_result = Elf::load(&gm, None, &mut Cursor::new(&dummynote_image), None).unwrap();
assert!(loader_result.pvh_entry_addr.is_none());
assert_eq!(
loader_result.pvh_boot_cap,
PvhBootCapability::PvhEntryNotPresent
);
}

#[test]
Expand Down