Skip to content

Prepare GEP building for opaque pointers #87695

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Aug 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 20 additions & 8 deletions compiler/rustc_codegen_llvm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -497,9 +497,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let pair_ty = place.layout.llvm_type(self);

let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = self.struct_gep(place.llval, i as u64);
let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar);
Expand Down Expand Up @@ -543,7 +544,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
.val
.store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));

let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
let next = body_bx.inbounds_gep(
self.backend_type(cg_elem.layout),
current,
&[self.const_usize(1)],
);
body_bx.br(header_bx.llbb());
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());

Expand Down Expand Up @@ -639,10 +644,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}

fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
unsafe {
llvm::LLVMBuildGEP(
llvm::LLVMBuildGEP2(
self.llbuilder,
ty,
ptr,
indices.as_ptr(),
indices.len() as c_uint,
Expand All @@ -651,10 +657,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}

fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
fn inbounds_gep(
&mut self,
ty: &'ll Type,
ptr: &'ll Value,
indices: &[&'ll Value],
) -> &'ll Value {
unsafe {
llvm::LLVMBuildInBoundsGEP(
llvm::LLVMBuildInBoundsGEP2(
self.llbuilder,
ty,
ptr,
indices.as_ptr(),
indices.len() as c_uint,
Expand All @@ -663,9 +675,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}

fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
assert_eq!(idx as c_uint as u64, idx);
unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
}

/* Casts */
Expand Down
6 changes: 4 additions & 2 deletions compiler/rustc_codegen_llvm/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
};
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
llvm::LLVMRustConstInBoundsGEP2(
self.type_i8(),
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
&self.const_usize(offset.bytes()),
1,
Expand Down Expand Up @@ -303,7 +304,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let base_addr = self.static_addr_of(init, alloc.align, None);

let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
llvm::LLVMRustConstInBoundsGEP2(
self.type_i8(),
self.const_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
Expand Down
7 changes: 3 additions & 4 deletions compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,11 @@ use rustc_span::symbol::sym;
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
if needs_gdb_debug_scripts_section(bx) {
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
let gdb_debug_scripts_section =
bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let indices = [bx.const_i32(0), bx.const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
unsafe {
llvm::LLVMSetAlignment(volative_load_instruction, 1);
}
Expand Down
16 changes: 12 additions & 4 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -686,11 +686,19 @@ fn codegen_emcc_try(
// create an alloca and pass a pointer to that.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let i8_align = bx.tcx().data_layout.i8_align.abi;
let catch_data =
catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align);
let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
let catch_data = catch.alloca(catch_data_type, ptr_align);
let catch_data_0 = catch.inbounds_gep(
catch_data_type,
catch_data,
&[bx.const_usize(0), bx.const_usize(0)],
);
catch.store(ptr, catch_data_0, ptr_align);
let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
let catch_data_1 = catch.inbounds_gep(
catch_data_type,
catch_data,
&[bx.const_usize(0), bx.const_usize(1)],
);
catch.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = catch.bitcast(catch_data, bx.type_i8p());

Expand Down
12 changes: 8 additions & 4 deletions compiler/rustc_codegen_llvm/src/llvm/ffi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1011,7 +1011,8 @@ extern "C" {
pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;

// Constant expressions
pub fn LLVMConstInBoundsGEP(
pub fn LLVMRustConstInBoundsGEP2(
ty: &'a Type,
ConstantVal: &'a Value,
ConstantIndices: *const &'a Value,
NumIndices: c_uint,
Expand Down Expand Up @@ -1394,22 +1395,25 @@ extern "C" {

pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;

pub fn LLVMBuildGEP(
pub fn LLVMBuildGEP2(
B: &Builder<'a>,
Ty: &'a Type,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildInBoundsGEP(
pub fn LLVMBuildInBoundsGEP2(
B: &Builder<'a>,
Ty: &'a Type,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildStructGEP(
pub fn LLVMBuildStructGEP2(
B: &Builder<'a>,
Ty: &'a Type,
Pointer: &'a Value,
Idx: c_uint,
Name: *const c_char,
Expand Down
15 changes: 8 additions & 7 deletions compiler/rustc_codegen_llvm/src/va_arg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,12 @@ fn emit_direct_ptr_va_arg(

let aligned_size = size.align_to(slot_size).bytes() as i32;
let full_direct_size = bx.cx().const_i32(aligned_size);
let next = bx.inbounds_gep(addr, &[full_direct_size]);
let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);

if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
} else {
(bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
Expand Down Expand Up @@ -98,6 +98,7 @@ fn emit_aapcs_va_arg(
// Implementation of the AAPCS64 calling convention for va_args see
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
let va_list_addr = list.immediate();
let va_list_ty = list.deref(bx.cx).layout.llvm_type(bx);
let layout = bx.cx.layout_of(target_ty);

let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
Expand All @@ -109,11 +110,11 @@ fn emit_aapcs_va_arg(

let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
let (reg_off, reg_top_index, slot_size) = if gr_type {
let gr_offs = bx.struct_gep(va_list_addr, 7);
let gr_offs = bx.struct_gep(va_list_ty, va_list_addr, 7);
let nreg = (layout.size.bytes() + 7) / 8;
(gr_offs, 3, nreg * 8)
} else {
let vr_off = bx.struct_gep(va_list_addr, 9);
let vr_off = bx.struct_gep(va_list_ty, va_list_addr, 9);
let nreg = (layout.size.bytes() + 15) / 16;
(vr_off, 5, nreg * 16)
};
Expand Down Expand Up @@ -141,15 +142,15 @@ fn emit_aapcs_va_arg(
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());

let top_type = bx.type_i8p();
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.struct_gep(va_list_ty, va_list_addr, reg_top_index);
let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);

// reg_value = *(@top + reg_off_v);
let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
let mut reg_addr = in_reg.gep(bx.type_i8(), top, &[reg_off_v]);
if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
// On big-endian systems the value is right-aligned in its slot.
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
reg_addr = in_reg.gep(reg_addr, &[offset]);
reg_addr = in_reg.gep(bx.type_i8(), reg_addr, &[offset]);
}
let reg_type = layout.llvm_type(bx);
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
Expand Down
7 changes: 5 additions & 2 deletions compiler/rustc_codegen_ssa/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,11 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let ptr_ty = cx.type_i8p();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep =
bx.inbounds_gep(llvtable, &[bx.const_usize(u64::try_from(entry_idx).unwrap())]);
let gep = bx.inbounds_gep(
ptr_ty,
llvtable,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())],
);
let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr);
// Vtable loads are invariant.
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_ssa/src/meth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ impl<'a, 'tcx> VirtualIndex {
let llty = bx.fn_ptr_backend_type(fn_abi);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, ptr_align);
bx.nonnull_metadata(ptr);
// Vtable loads are invariant.
Expand All @@ -42,7 +42,7 @@ impl<'a, 'tcx> VirtualIndex {
let llty = bx.type_isize();
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, usize_align);
// Vtable loads are invariant.
bx.set_invariant_load(ptr);
Expand Down
8 changes: 6 additions & 2 deletions compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
sym::offset => {
let ty = substs.type_at(0);
let layout = bx.layout_of(ty);
let ptr = args[0].immediate();
let offset = args[1].immediate();
bx.inbounds_gep(ptr, &[offset])
bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
}
sym::arith_offset => {
let ty = substs.type_at(0);
let layout = bx.layout_of(ty);
let ptr = args[0].immediate();
let offset = args[1].immediate();
bx.gep(ptr, &[offset])
bx.gep(bx.backend_type(layout), ptr, &[offset])
}
sym::copy => {
copy_intrinsic(
Expand Down
5 changes: 3 additions & 2 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -311,14 +311,15 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout),
};
let ty = bx.backend_type(dest.layout);
let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi);

let llptr = bx.struct_gep(dest.llval, 0);
let llptr = bx.struct_gep(ty, dest.llval, 0);
let val = bx.from_immediate(a);
let align = dest.align;
bx.store_with_flags(val, llptr, align, flags);

let llptr = bx.struct_gep(dest.llval, 1);
let llptr = bx.struct_gep(ty, dest.llval, 1);
let val = bx.from_immediate(b);
let align = dest.align.restrict_for_offset(b_offset);
bx.store_with_flags(val, llptr, align, flags);
Expand Down
18 changes: 13 additions & 5 deletions compiler/rustc_codegen_ssa/src/mir/place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,13 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
{
// Offset matches second field.
bx.struct_gep(self.llval, 1)
let ty = bx.backend_type(self.layout);
bx.struct_gep(ty, self.llval, 1)
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it necessary to pointercast this self.llval anymore?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When an explicit pointee type is provided it must be equal to the pointer element type, so this will have to wait until migration to opaque pointers is complete:

https://github.com/rust-lang/llvm-project/blob/260e0f8682098faab68af9c608534756ad378365/llvm/include/llvm/IR/Instructions.h#L956-L958

bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
Expand All @@ -119,7 +120,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
self.layout
);
}
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
_ => {
let ty = bx.backend_type(self.layout);
bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
}
};
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
Expand Down Expand Up @@ -185,7 +189,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {

// Cast and adjust pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similarly here.

let byte_ptr = bx.gep(byte_ptr, &[offset]);
let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);

// Finally, cast back to the type expected.
let ll_fty = bx.cx().backend_type(field);
Expand Down Expand Up @@ -380,7 +384,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
};

PlaceRef {
llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llval: bx.inbounds_gep(
bx.cx().backend_type(self.layout),
self.llval,
&[bx.cx().const_usize(0), llindex],
),
llextra: None,
layout,
align: self.align.restrict_for_offset(offset),
Expand Down
9 changes: 8 additions & 1 deletion compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::BinOp::BitOr => bx.or(lhs, rhs),
mir::BinOp::BitAnd => bx.and(lhs, rhs),
mir::BinOp::BitXor => bx.xor(lhs, rhs),
mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
mir::BinOp::Offset => {
let pointee_type = input_ty
.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
.ty;
let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
bx.inbounds_gep(llty, lhs, &[rhs])
}
mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
mir::BinOp::Ne
Expand Down
11 changes: 8 additions & 3 deletions compiler/rustc_codegen_ssa/src/traits/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,14 @@ pub trait BuilderMethods<'a, 'tcx>:
size: Size,
);

fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value;
fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
fn inbounds_gep(
&mut self,
ty: Self::Type,
ptr: Self::Value,
indices: &[Self::Value],
) -> Self::Value;
fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;

fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
Expand Down
10 changes: 10 additions & 0 deletions compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1551,6 +1551,16 @@ extern "C" void LLVMRustSetLinkage(LLVMValueRef V,
LLVMSetLinkage(V, fromRust(RustLinkage));
}

extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty,
LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices) {
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
Constant *Val = unwrap<Constant>(ConstantVal);
return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
}

// Returns true if both high and low were successfully set. Fails in case constant wasn’t any of
// the common sizes (1, 8, 16, 32, 64, 128 bits)
extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)
Expand Down