|
1 | 1 | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 | 2 | // SPDX-License-Identifier: Apache-2.0
|
3 | 3 |
|
| 4 | +use std::mem::offset_of; |
| 5 | + |
4 | 6 | use gdbstub_arch::aarch64::reg::AArch64CoreRegs as CoreRegs;
|
| 7 | +use kvm_bindings::{ |
| 8 | + kvm_guest_debug, kvm_regs, user_pt_regs, KVM_GUESTDBG_ENABLE, KVM_GUESTDBG_SINGLESTEP, |
| 9 | + KVM_GUESTDBG_USE_HW, KVM_GUESTDBG_USE_SW_BP, KVM_REG_ARM64, KVM_REG_ARM_CORE, KVM_REG_SIZE_U64, |
| 10 | +}; |
5 | 11 | use kvm_ioctls::VcpuFd;
|
6 |
| -use vm_memory::GuestAddress; |
| 12 | +use vm_memory::{Bytes, GuestAddress}; |
7 | 13 |
|
| 14 | +use crate::arch::aarch64::regs::{ |
| 15 | + arm64_core_reg_id, Aarch64RegisterVec, ID_AA64MMFR0_EL1, TCR_EL1, TTBR1_EL1, |
| 16 | +}; |
| 17 | +use crate::arch::aarch64::vcpu::get_registers; |
8 | 18 | use crate::gdb::target::GdbTargetError;
|
| 19 | +use crate::Vmm; |
| 20 | + |
| 21 | +/// Configures the number of bytes required for a software breakpoint. |
| 22 | +/// |
| 23 | +/// The breakpoint instruction operation also includes the immediate argument which we 0 hence the |
| 24 | +/// size. |
| 25 | +pub const SW_BP_SIZE: usize = 4; |
| 26 | + |
| 27 | +/// The bytes stored for a software breakpoint. |
| 28 | +/// |
| 29 | +/// This is the BRK instruction with a 0 immediate argument. |
| 30 | +/// https://developer.arm.com/documentation/ddi0602/2024-09/Base-Instructions/BRK--Breakpoint-instruction- |
| 31 | +pub const SW_BP: [u8; SW_BP_SIZE] = [0, 0, 32, 212]; |
| 32 | + |
| 33 | +/// Register id for the program counter |
| 34 | +const PC_REG_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc)); |
| 35 | + |
| 36 | +/// Retrieve a single register from a Vcpu |
| 37 | +fn get_sys_reg(reg: u64, vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> { |
| 38 | + let mut register_vec = Aarch64RegisterVec::default(); |
| 39 | + get_registers(vcpu_fd, &[reg], &mut register_vec)?; |
| 40 | + let register = register_vec |
| 41 | + .iter() |
| 42 | + .next() |
| 43 | + .ok_or(GdbTargetError::ReadRegisterVecError)?; |
| 44 | + |
| 45 | + Ok(register.value()) |
| 46 | +} |
| 47 | + |
| 48 | +/// Gets the PC value for a Vcpu |
| 49 | +pub fn get_instruction_pointer(vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> { |
| 50 | + get_sys_reg(PC_REG_ID, vcpu_fd) |
| 51 | +} |
9 | 52 |
|
10 |
| -/// Configures the number of bytes required for a software breakpoint |
11 |
| -pub const SW_BP_SIZE: usize = 1; |
| 53 | +/// Helper to extract a specific number of bits at an offset from a u64 |
| 54 | +macro_rules! extract_bits_64 { |
| 55 | + ($value: tt, $offset: tt, $length: tt) => { |
| 56 | + ($value >> $offset) & (!0u64 >> (64 - $length)) |
| 57 | + }; |
| 58 | +} |
| 59 | + |
| 60 | +/// Mask to clear the last 3 bits from the page table entry |
| 61 | +const PTE_ADDRESS_MASK: u64 = !0b111u64; |
12 | 62 |
|
13 |
| -/// The bytes stored for a software breakpoint |
14 |
| -pub const SW_BP: [u8; SW_BP_SIZE] = [0]; |
| 63 | +/// Read a u64 value from a guest memory address |
| 64 | +fn read_address(vmm: &Vmm, address: u64) -> Result<u64, GdbTargetError> { |
| 65 | + let mut buf = [0; 8]; |
| 66 | + vmm.guest_memory().read(&mut buf, GuestAddress(address))?; |
15 | 67 |
|
16 |
| -/// Gets the RIP value for a Vcpu |
17 |
| -pub fn get_instruction_pointer(_vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> { |
18 |
| - unimplemented!() |
| 68 | + Ok(u64::from_le_bytes(buf)) |
19 | 69 | }
|
20 | 70 |
|
21 |
| -/// Translates a virtual address according to the vCPU's current address translation mode. |
22 |
| -pub fn translate_gva(_vcpu_fd: &VcpuFd, _gva: u64) -> Result<u64, GdbTargetError> { |
23 |
| - unimplemented!() |
| 71 | +/// The grainsize used with 4KB paging |
| 72 | +const GRAIN_SIZE: usize = 9; |
| 73 | + |
| 74 | +/// Translates a virtual address according to the Vcpu's current address translation mode. |
| 75 | +/// Returns the GPA (guest physical address) |
| 76 | +/// |
| 77 | +/// To simplify the implementation we've made some assumptions about the paging setup. |
| 78 | +/// Here we just assert firstly paging is setup and these assumptions are correct. |
| 79 | +pub fn translate_gva(vcpu_fd: &VcpuFd, gva: u64, vmm: &Vmm) -> Result<u64, GdbTargetError> { |
| 80 | + // Check this virtual address is in kernel space |
| 81 | + if extract_bits_64!(gva, 55, 1) == 0 { |
| 82 | + return Err(GdbTargetError::GvaTranslateError); |
| 83 | + } |
| 84 | + |
| 85 | + // Translation control register |
| 86 | + let tcr_el1: u64 = get_sys_reg(TCR_EL1, vcpu_fd)?; |
| 87 | + |
| 88 | + // If this is 0 then translation is not yet ready |
| 89 | + if extract_bits_64!(tcr_el1, 16, 6) == 0 { |
| 90 | + return Ok(gva); |
| 91 | + } |
| 92 | + |
| 93 | + // Check 4KB pages are being used |
| 94 | + if extract_bits_64!(tcr_el1, 30, 2) != 2 { |
| 95 | + return Err(GdbTargetError::GvaTranslateError); |
| 96 | + } |
| 97 | + |
| 98 | + // ID_AA64MMFR0_EL1 provides information about the implemented memory model and memory |
| 99 | + // management. Check this is a physical address size we support |
| 100 | + let pa_size = match get_sys_reg(ID_AA64MMFR0_EL1, vcpu_fd)? & 0b1111 { |
| 101 | + 0 => 32, |
| 102 | + 1 => 36, |
| 103 | + 2 => 40, |
| 104 | + 3 => 42, |
| 105 | + 4 => 44, |
| 106 | + 5 => 48, |
| 107 | + _ => return Err(GdbTargetError::GvaTranslateError), |
| 108 | + }; |
| 109 | + |
| 110 | + // A mask of the physical address size for a virtual address |
| 111 | + let pa_address_mask: u64 = !0u64 >> (64 - pa_size); |
| 112 | + // A mask used to take the bottom 12 bits of a value this is as we have a grainsize of 9 |
| 113 | + // asserted with our 4kb page, plus the offset of 3 |
| 114 | + let lower_mask: u64 = 0xFFF; |
| 115 | + // A mask for a physical address mask with the lower 12 bits cleared |
| 116 | + let desc_mask: u64 = pa_address_mask & !lower_mask; |
| 117 | + |
| 118 | + let page_indices = [ |
| 119 | + (gva >> (GRAIN_SIZE * 4)) & lower_mask, |
| 120 | + (gva >> (GRAIN_SIZE * 3)) & lower_mask, |
| 121 | + (gva >> (GRAIN_SIZE * 2)) & lower_mask, |
| 122 | + (gva >> GRAIN_SIZE) & lower_mask, |
| 123 | + ]; |
| 124 | + |
| 125 | + // Transition table base register used for initial table lookup. |
| 126 | + // Take the bottom 48 bits from the register value. |
| 127 | + let mut address: u64 = get_sys_reg(TTBR1_EL1, vcpu_fd)? & pa_address_mask; |
| 128 | + let mut level = 0; |
| 129 | + |
| 130 | + while level < 4 { |
| 131 | + // Clear the bottom 3 bits from this address |
| 132 | + let pte = read_address(vmm, (address + page_indices[level]) & PTE_ADDRESS_MASK)?; |
| 133 | + address = pte & desc_mask; |
| 134 | + |
| 135 | + // If this is a valid table entry and we aren't at the end of the page tables |
| 136 | + // then loop again and check next level |
| 137 | + if (pte & 2 != 0) && (level < 3) { |
| 138 | + level += 1; |
| 139 | + continue; |
| 140 | + } |
| 141 | + break; |
| 142 | + } |
| 143 | + |
| 144 | + // Generate a mask to split between the page table entry and the GVA. The split point is |
| 145 | + // dependent on which level we terminate at. This is calculated by taking the level we |
| 146 | + // hit multiplied by the grainsize then adding the 3 offset |
| 147 | + let page_size = 1u64 << ((GRAIN_SIZE * (4 - level)) + 3); |
| 148 | + // Clear bottom bits of page size |
| 149 | + address &= !(page_size - 1); |
| 150 | + address |= gva & (page_size - 1); |
| 151 | + Ok(address) |
24 | 152 | }
|
25 | 153 |
|
26 | 154 | /// Configures the kvm guest debug regs to register the hardware breakpoints
|
27 | 155 | fn set_kvm_debug(
|
28 |
| - _control: u32, |
29 |
| - _vcpu_fd: &VcpuFd, |
30 |
| - _addrs: &[GuestAddress], |
| 156 | + control: u32, |
| 157 | + vcpu_fd: &VcpuFd, |
| 158 | + addrs: &[GuestAddress], |
31 | 159 | ) -> Result<(), GdbTargetError> {
|
32 |
| - unimplemented!() |
| 160 | + let mut dbg = kvm_guest_debug { |
| 161 | + control, |
| 162 | + ..Default::default() |
| 163 | + }; |
| 164 | + |
| 165 | + for (i, addr) in addrs.iter().enumerate() { |
| 166 | + // DBGBCR_EL1 (Debug Breakpoint Control Registers, D13.3.2): |
| 167 | + // bit 0: 1 (Enabled) |
| 168 | + // bit 1~2: 0b11 (PMC = EL1/EL0) |
| 169 | + // bit 5~8: 0b1111 (BAS = AArch64) |
| 170 | + // others: 0 |
| 171 | + dbg.arch.dbg_bcr[i] = 0b1 | (0b11 << 1) | (0b1111 << 5); |
| 172 | + // DBGBVR_EL1 (Debug Breakpoint Value Registers, D13.3.3): |
| 173 | + // bit 2~52: VA[2:52] |
| 174 | + dbg.arch.dbg_bvr[i] = (!0u64 >> 11) & addr.0; |
| 175 | + } |
| 176 | + |
| 177 | + vcpu_fd.set_guest_debug(&dbg)?; |
| 178 | + |
| 179 | + Ok(()) |
| 180 | +} |
| 181 | + |
| 182 | +/// Bits in a Vcpu pstate for IRQ |
| 183 | +const IRQ_ENABLE_FLAGS: u64 = 0x80 | 0x40; |
| 184 | +/// Register id for pstate |
| 185 | +const PSTATE_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pstate)); |
| 186 | + |
| 187 | +/// Disable IRQ interrupts to avoid getting stuck in a loop while single stepping |
| 188 | +/// |
| 189 | +/// When GDB hits a single breakpoint and resumes it will follow the steps: |
| 190 | +/// - Clear SW breakpoint we've hit |
| 191 | +/// - Single step |
| 192 | +/// - Re-insert the SW breakpoint |
| 193 | +/// - Resume |
| 194 | +/// However, with IRQ enabled the single step takes us into the IRQ handler so when we resume we |
| 195 | +/// immediately hit the SW breapoint we just re-inserted getting stuck in a loop. |
| 196 | +fn toggle_interrupts(vcpu_fd: &VcpuFd, enable: bool) -> Result<(), GdbTargetError> { |
| 197 | + let mut pstate = get_sys_reg(PSTATE_ID, vcpu_fd)?; |
| 198 | + |
| 199 | + if enable { |
| 200 | + pstate |= IRQ_ENABLE_FLAGS; |
| 201 | + } else { |
| 202 | + pstate &= !IRQ_ENABLE_FLAGS; |
| 203 | + } |
| 204 | + |
| 205 | + vcpu_fd.set_one_reg(PSTATE_ID, &pstate.to_le_bytes())?; |
| 206 | + |
| 207 | + Ok(()) |
33 | 208 | }
|
34 | 209 |
|
35 | 210 | /// Configures the Vcpu for debugging and sets the hardware breakpoints on the Vcpu
|
36 | 211 | pub fn vcpu_set_debug(
|
37 |
| - _vcpu_fd: &VcpuFd, |
38 |
| - _addrs: &[GuestAddress], |
39 |
| - _step: bool, |
| 212 | + vcpu_fd: &VcpuFd, |
| 213 | + addrs: &[GuestAddress], |
| 214 | + step: bool, |
40 | 215 | ) -> Result<(), GdbTargetError> {
|
41 |
| - unimplemented!() |
| 216 | + let mut control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW | KVM_GUESTDBG_USE_SW_BP; |
| 217 | + if step { |
| 218 | + control |= KVM_GUESTDBG_SINGLESTEP; |
| 219 | + } |
| 220 | + |
| 221 | + toggle_interrupts(vcpu_fd, step)?; |
| 222 | + set_kvm_debug(control, vcpu_fd, addrs) |
42 | 223 | }
|
43 | 224 |
|
44 |
| -/// Injects a BP back into the guest kernel for it to handle, this is particularly useful for the |
45 |
| -/// kernels selftesting which can happen during boot. |
| 225 | +/// KVM does not support injecting breakpoints on aarch64 so this is a no-op |
46 | 226 | pub fn vcpu_inject_bp(
|
47 | 227 | _vcpu_fd: &VcpuFd,
|
48 | 228 | _addrs: &[GuestAddress],
|
49 | 229 | _step: bool,
|
50 | 230 | ) -> Result<(), GdbTargetError> {
|
51 |
| - unimplemented!() |
| 231 | + Ok(()) |
52 | 232 | }
|
| 233 | +/// The number of general purpose registers |
| 234 | +const GENERAL_PURPOSE_REG_COUNT: usize = 31; |
| 235 | +/// The number of core registers we read from the Vcpu |
| 236 | +const CORE_REG_COUNT: usize = 33; |
| 237 | +/// Stores the register ids of registers to be read from the Vcpu |
| 238 | +const CORE_REG_IDS: [u64; CORE_REG_COUNT] = { |
| 239 | + let mut regs = [0; CORE_REG_COUNT]; |
| 240 | + let mut idx = 0; |
| 241 | + |
| 242 | + let reg_offset = offset_of!(kvm_regs, regs); |
| 243 | + let mut off = reg_offset; |
| 244 | + while idx < GENERAL_PURPOSE_REG_COUNT { |
| 245 | + regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, off); |
| 246 | + idx += 1; |
| 247 | + off += std::mem::size_of::<u64>(); |
| 248 | + } |
| 249 | + |
| 250 | + regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, sp)); |
| 251 | + idx += 1; |
| 252 | + |
| 253 | + regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc)); |
| 254 | + regs |
| 255 | +}; |
53 | 256 |
|
54 | 257 | /// Reads the registers for the Vcpu
|
55 |
| -pub fn read_registers(_vcpu_fd: &VcpuFd, _regs: &mut CoreRegs) -> Result<(), GdbTargetError> { |
56 |
| - unimplemented!() |
| 258 | +pub fn read_registers(vcpu_fd: &VcpuFd, regs: &mut CoreRegs) -> Result<(), GdbTargetError> { |
| 259 | + let mut register_vec = Aarch64RegisterVec::default(); |
| 260 | + get_registers(vcpu_fd, &CORE_REG_IDS, &mut register_vec)?; |
| 261 | + |
| 262 | + let mut registers = register_vec.iter(); |
| 263 | + |
| 264 | + for i in 0..GENERAL_PURPOSE_REG_COUNT { |
| 265 | + regs.x[i] = registers |
| 266 | + .next() |
| 267 | + .ok_or(GdbTargetError::ReadRegisterVecError)? |
| 268 | + .value(); |
| 269 | + } |
| 270 | + |
| 271 | + regs.sp = registers |
| 272 | + .next() |
| 273 | + .ok_or(GdbTargetError::ReadRegisterVecError)? |
| 274 | + .value(); |
| 275 | + |
| 276 | + regs.pc = registers |
| 277 | + .next() |
| 278 | + .ok_or(GdbTargetError::ReadRegisterVecError)? |
| 279 | + .value(); |
| 280 | + |
| 281 | + Ok(()) |
57 | 282 | }
|
58 | 283 |
|
59 | 284 | /// Writes to the registers for the Vcpu
|
60 |
| -pub fn write_registers(_vcpu_fd: &VcpuFd, _regs: &CoreRegs) -> Result<(), GdbTargetError> { |
61 |
| - unimplemented!() |
| 285 | +pub fn write_registers(vcpu_fd: &VcpuFd, regs: &CoreRegs) -> Result<(), GdbTargetError> { |
| 286 | + let kreg_off = offset_of!(kvm_regs, regs); |
| 287 | + let mut off = kreg_off; |
| 288 | + for i in 0..GENERAL_PURPOSE_REG_COUNT { |
| 289 | + vcpu_fd.set_one_reg( |
| 290 | + arm64_core_reg_id!(KVM_REG_SIZE_U64, off), |
| 291 | + ®s.x[i].to_le_bytes(), |
| 292 | + )?; |
| 293 | + off += std::mem::size_of::<u64>(); |
| 294 | + } |
| 295 | + |
| 296 | + let off = offset_of!(user_pt_regs, sp); |
| 297 | + vcpu_fd.set_one_reg( |
| 298 | + arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off), |
| 299 | + ®s.sp.to_le_bytes(), |
| 300 | + )?; |
| 301 | + |
| 302 | + let off = offset_of!(user_pt_regs, pc); |
| 303 | + vcpu_fd.set_one_reg( |
| 304 | + arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off), |
| 305 | + ®s.pc.to_le_bytes(), |
| 306 | + )?; |
| 307 | + |
| 308 | + Ok(()) |
62 | 309 | }
|
0 commit comments