Skip to content

Refactor call ABI according to LLVM version. #37

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 23, 2020
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 65 additions & 69 deletions src/librustc_target/abi/call/xtensa.rs
Original file line number Diff line number Diff line change
@@ -1,102 +1,98 @@
// reference: https://github.com/espressif/clang-xtensa/commit/6fb488d2553f06029e6611cf81c6efbd45b56e47#diff-aa74ae1e1ab6b7149789237edb78e688R8450
// reference: https://github.com/MabezDev/llvm-project/blob/xtensa_release_9.0.1_with_rust_patches-31-05-2020-cherry-pick/clang/lib/CodeGen/TargetInfo.cpp#L9668-L9767

use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
use crate::abi::{Abi, Size};

const NUM_ARG_GPR: u64 = 6;
const NUM_ARG_GPRS: u64 = 6;
const MAX_ARG_IN_REGS_SIZE: u64 = 4 * 32;
// const MAX_ARG_DIRECT_SIZE: u64 = MAX_ARG_IN_REGS_SIZE;
const MAX_RET_IN_REGS_SIZE: u64 = 2 * 32;

fn classify_ret_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
// The rules for return and argument types are the same, so defer to
// classifyArgumentType.
classify_arg_ty(arg, xlen, &mut 2); // two as max return size
if arg.is_ignore() {
return;
}

// The rules for return and argument types are the same,
// so defer to `classify_arg_ty`.
let mut arg_gprs_left = 2;
let fixed = true;
classify_arg_ty(arg, xlen, fixed, &mut arg_gprs_left);
}

fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, remaining_gpr: &mut u64) {
// Determine the number of GPRs needed to pass the current argument
// according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
// register pairs, so may consume 3 registers.
fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, fixed: bool, arg_gprs_left: &mut u64) {
assert!(*arg_gprs_left <= NUM_ARG_GPRS, "Arg GPR tracking underflow");

let arg_size = arg.layout.size;
if arg_size.bits() > MAX_ARG_IN_REGS_SIZE {
arg.make_indirect();
// Ignore empty structs/unions.
if arg.layout.is_zst() {
return;
}

let alignment = arg.layout.align.abi;
let mut required_gpr = 1u64; // at least one per arg
let size = arg.layout.size.bits();
let needed_align = arg.layout.align.abi.bits();
let mut must_use_stack = false;

// Determine the number of GPRs needed to pass the current argument
// according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
// register pairs, so may consume 3 registers.
let mut needed_arg_gprs = 1u64;

if alignment.bits() == 2 * xlen {
required_gpr = 2 + (*remaining_gpr % 2);
} else if arg_size.bits() > xlen && arg_size.bits() <= MAX_ARG_IN_REGS_SIZE {
required_gpr = (arg_size.bits() + (xlen - 1)) / xlen;
if !fixed && needed_align == 2 * xlen {
needed_arg_gprs = 2 + (*arg_gprs_left % 2);
} else if size > xlen && size <= MAX_ARG_IN_REGS_SIZE {
needed_arg_gprs = (size + xlen - 1) / xlen;
}

let mut stack_required = false;
if required_gpr > *remaining_gpr {
stack_required = true;
required_gpr = *remaining_gpr;
if needed_arg_gprs > *arg_gprs_left {
must_use_stack = true;
needed_arg_gprs = *arg_gprs_left;
}
*remaining_gpr -= required_gpr;
*arg_gprs_left -= needed_arg_gprs;

// if a value can fit in a reg and the
// stack is not required, extend
if !arg.layout.is_aggregate() {
// non-aggregate types
if arg_size.bits() < xlen && !stack_required {
if !arg.layout.is_aggregate() && !matches!(arg.layout.abi, Abi::Vector { .. }) {
// All integral types are promoted to `xlen`
// width, unless passed on the stack.
if size < xlen && !must_use_stack {
arg.extend_integer_width_to(xlen);
return;
}
} else if arg_size.bits() as u64 <= MAX_ARG_IN_REGS_SIZE {
// aggregate types
// Aggregates which are <= 4*32 will be passed in registers if possible,
// so coerce to integers.

// Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
// required, and a 2-element XLen array if only XLen alignment is
return;
}

// Aggregates which are <= 4 * 32 will be passed in
// registers if possible, so coerce to integers.
if size as u64 <= MAX_ARG_IN_REGS_SIZE {
let alignment = arg.layout.align.abi.bits();

// Use a single `xlen` int if possible, 2 * `xlen` if 2 * `xlen` alignment
// is required, and a 2-element `xlen` array if only `xlen` alignment is
// required.
// if alignment == 2 * xlen {
// arg.extend_integer_width_to(xlen * 2);
// } else {
// arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen);
// }
if alignment.bits() == 2 * xlen {
arg.cast_to(Uniform { unit: Reg::i64(), total: arg_size });
if size <= xlen {
arg.cast_to(Reg::i32());
return;
} else if alignment == 2 * xlen {
arg.cast_to(Reg::i64());
return;
} else {
//FIXME array type - this should be a homogenous array type
// arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen);
let total = Size::from_bits(((size + xlen - 1) / xlen) * xlen);
arg.cast_to(Uniform { unit: Reg::i32(), total });
return;
}
} else {
// if we get here the stack is required
assert!(stack_required);
arg.make_indirect();
}

// if arg_size as u64 <= MAX_ARG_IN_REGS_SIZE {
// let align = arg.layout.align.abi.bytes();
// let total = arg.layout.size;
// arg.cast_to(Uniform {
// unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
// total
// });
// return;
// }
arg.make_indirect();
}

pub fn compute_abi_info<Ty>(fabi: &mut FnAbi<'_, Ty>, xlen: u64) {
if !fabi.ret.is_ignore() {
classify_ret_ty(&mut fabi.ret, xlen);
}
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>, xlen: u64) {
classify_ret_ty(&mut fn_abi.ret, xlen);

let return_indirect =
fabi.ret.layout.size.bits() > MAX_RET_IN_REGS_SIZE || fabi.ret.is_indirect();
let is_ret_indirect =
fn_abi.ret.is_indirect() || fn_abi.ret.layout.size.bits() > MAX_RET_IN_REGS_SIZE;

let mut remaining_gpr = if return_indirect { NUM_ARG_GPR - 1 } else { NUM_ARG_GPR };
let mut arg_gprs_left = if is_ret_indirect { NUM_ARG_GPRS - 1 } else { NUM_ARG_GPRS };

for arg in &mut fabi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg, xlen, &mut remaining_gpr);
for arg in &mut fn_abi.args {
let fixed = true;
classify_arg_ty(arg, xlen, fixed, &mut arg_gprs_left);
}
}