Skip to content
This repository has been archived by the owner on Apr 23, 2020. It is now read-only.

Commit

Permalink
Revert "AMDGPU: Fix handling of alignment padding in DAG argument low…
Browse files Browse the repository at this point in the history
…ering"

This reverts commit r337021.

WARNING: MemorySanitizer: use-of-uninitialized-value
    #0 0x1415cd65 in void write_signed<long>(llvm::raw_ostream&, long, unsigned long, llvm::IntegerStyle) /code/llvm-project/llvm/lib/Support/NativeFormatting.cpp:95:7
    #1 0x1415c900 in llvm::write_integer(llvm::raw_ostream&, long, unsigned long, llvm::IntegerStyle) /code/llvm-project/llvm/lib/Support/NativeFormatting.cpp:121:3
    #2 0x1472357f in llvm::raw_ostream::operator<<(long) /code/llvm-project/llvm/lib/Support/raw_ostream.cpp:117:3
    #3 0x13bb9d4 in llvm::raw_ostream::operator<<(int) /code/llvm-project/llvm/include/llvm/Support/raw_ostream.h:210:18
    #4 0x3c2bc18 in void printField<unsigned int, &(amd_kernel_code_s::amd_kernel_code_version_major)>(llvm::StringRef, amd_kernel_code_s const&, llvm::raw_ostream&) /code/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp:78:23
    #5 0x3c250ba in llvm::printAmdKernelCodeField(amd_kernel_code_s const&, int, llvm::raw_ostream&) /code/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp:104:5
    #6 0x3c27ca3 in llvm::dumpAmdKernelCode(amd_kernel_code_s const*, llvm::raw_ostream&, char const*) /code/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp:113:5
    #7 0x3a46e6c in llvm::AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(amd_kernel_code_s const&) /code/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp:161:3
    #8 0xd371e4 in llvm::AMDGPUAsmPrinter::EmitFunctionBodyStart() /code/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp:204:26

[...]

Uninitialized value was created by an allocation of 'KernelCode' in the stack frame of function '_ZN4llvm16AMDGPUAsmPrinter21EmitFunctionBodyStartEv'
    #0 0xd36650 in llvm::AMDGPUAsmPrinter::EmitFunctionBodyStart() /code/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp:192

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337079 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
eugenis committed Jul 14, 2018
1 parent 383081c commit 1382a3a
Show file tree
Hide file tree
Showing 17 changed files with 214 additions and 420 deletions.
14 changes: 4 additions & 10 deletions lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1128,13 +1128,6 @@ static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
const SIProgramInfo &CurrentProgramInfo,
const MachineFunction &MF) const {
const Function &F = MF.getFunction();

// Avoid asserting on erroneous cases.
if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL &&
F.getCallingConv() != CallingConv::SPIR_KERNEL)
return;

const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();

Expand Down Expand Up @@ -1181,8 +1174,9 @@ void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
if (STM.isXNACKEnabled())
Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;

unsigned MaxKernArgAlign;
Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign);
// FIXME: Should use getKernArgSize
Out.kernarg_segment_byte_size =
STM.getKernArgSegmentSize(MF.getFunction(), MFI->getExplicitKernArgSize());
Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR;
Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR;
Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize;
Expand All @@ -1191,7 +1185,7 @@ void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
// These alignment values are specified in powers of two, so alignment =
// 2^n. The minimum alignment is 2^4 = 16.
Out.kernarg_segment_alignment = std::max((size_t)4,
countTrailingZeros(MaxKernArgAlign));
countTrailingZeros(MFI->getMaxKernArgAlign()));

if (STM.debuggerEmitPrologue()) {
Out.debug_wavefront_private_segment_offset_sgpr =
Expand Down
11 changes: 5 additions & 6 deletions lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,16 +209,15 @@ Kernel::CodeProps::Metadata MetadataStreamer::getHSACodeProps(
const Function &F = MF.getFunction();

// Avoid asserting on erroneous cases.
if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL &&
F.getCallingConv() != CallingConv::SPIR_KERNEL)
if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL)
return HSACodeProps;

unsigned MaxKernArgAlign;
HSACodeProps.mKernargSegmentSize = STM.getKernArgSegmentSize(F,
MaxKernArgAlign);
HSACodeProps.mKernargSegmentSize =
STM.getKernArgSegmentSize(F, MFI.getExplicitKernArgSize());
HSACodeProps.mGroupSegmentFixedSize = ProgramInfo.LDSSize;
HSACodeProps.mPrivateSegmentFixedSize = ProgramInfo.ScratchSize;
HSACodeProps.mKernargSegmentAlign = std::max(MaxKernArgAlign, 4u);
HSACodeProps.mKernargSegmentAlign =
std::max(uint32_t(4), MFI.getMaxKernArgAlign());
HSACodeProps.mWavefrontSize = STM.getWavefrontSize();
HSACodeProps.mNumSGPRs = ProgramInfo.NumSGPR;
HSACodeProps.mNumVGPRs = ProgramInfo.NumVGPR;
Expand Down
183 changes: 75 additions & 108 deletions lib/Target/AMDGPU/AMDGPUISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
Expand All @@ -41,6 +40,18 @@
#include "llvm/Support/KnownBits.h"
using namespace llvm;

static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
MachineFunction &MF = State.getMachineFunction();
AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();

uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(),
ArgFlags.getOrigAlign());
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}

static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State,
Expand Down Expand Up @@ -899,118 +910,74 @@ CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
/// for each individual part is i8. We pass the memory type as LocVT to the
/// calling convention analysis function and the register type (Ins[x].VT) as
/// the ValVT.
void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const {
const MachineFunction &MF = State.getMachineFunction();
const Function &Fn = MF.getFunction();
LLVMContext &Ctx = Fn.getParent()->getContext();
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);

unsigned MaxAlign = 1;
uint64_t ExplicitArgOffset = 0;
const DataLayout &DL = Fn.getParent()->getDataLayout();

unsigned InIndex = 0;

for (const Argument &Arg : Fn.args()) {
Type *BaseArgTy = Arg.getType();
unsigned Align = DL.getABITypeAlignment(BaseArgTy);
MaxAlign = std::max(Align, MaxAlign);
unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);

uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset;
ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;

// We're basically throwing away everything passed into us and starting over
// to get accurate in-memory offsets. The "PartOffset" is completely useless
// to us as computed in Ins.
//
// We also need to figure out what type legalization is trying to do to get
// the correct memory offsets.

SmallVector<EVT, 16> ValueVTs;
SmallVector<uint64_t, 16> Offsets;
ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);

for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
uint64_t BasePartOffset = Offsets[Value];

EVT ArgVT = ValueVTs[Value];
EVT MemVT = ArgVT;
MVT RegisterVT =
getRegisterTypeForCallingConv(Ctx, ArgVT);
unsigned NumRegs =
getNumRegistersForCallingConv(Ctx, ArgVT);

if (!Subtarget->isAmdHsaOS() &&
(ArgVT == MVT::i16 || ArgVT == MVT::i8 || ArgVT == MVT::f16)) {
// The ABI says the caller will extend these values to 32-bits.
MemVT = ArgVT.isInteger() ? MVT::i32 : MVT::f32;
} else if (NumRegs == 1) {
// This argument is not split, so the IR type is the memory type.
if (ArgVT.isExtended()) {
// We have an extended type, like i24, so we should just use the
// register type.
MemVT = RegisterVT;
} else {
MemVT = ArgVT;
}
} else if (ArgVT.isVector() && RegisterVT.isVector() &&
ArgVT.getScalarType() == RegisterVT.getScalarType()) {
assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
// We have a vector value which has been split into a vector with
// the same scalar type, but fewer elements. This should handle
// all the floating-point vector types.
MemVT = RegisterVT;
} else if (ArgVT.isVector() &&
ArgVT.getVectorNumElements() == NumRegs) {
// This arg has been split so that each element is stored in a separate
// register.
MemVT = ArgVT.getScalarType();
} else if (ArgVT.isExtended()) {
// We have an extended type, like i65.
MemVT = RegisterVT;
void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
const ISD::InputArg &In = Ins[i];
EVT MemVT;

unsigned NumRegs = getNumRegisters(State.getContext(), In.ArgVT);

if (!Subtarget->isAmdHsaOS() &&
(In.ArgVT == MVT::i16 || In.ArgVT == MVT::i8 || In.ArgVT == MVT::f16)) {
// The ABI says the caller will extend these values to 32-bits.
MemVT = In.ArgVT.isInteger() ? MVT::i32 : MVT::f32;
} else if (NumRegs == 1) {
// This argument is not split, so the IR type is the memory type.
assert(!In.Flags.isSplit());
if (In.ArgVT.isExtended()) {
// We have an extended type, like i24, so we should just use the register type
MemVT = In.VT;
} else {
unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
if (RegisterVT.isInteger()) {
MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
} else if (RegisterVT.isVector()) {
assert(!RegisterVT.getScalarType().isFloatingPoint());
unsigned NumElements = RegisterVT.getVectorNumElements();
assert(MemoryBits % NumElements == 0);
// This vector type has been split into another vector type with
// a different elements size.
EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
MemoryBits / NumElements);
MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
} else {
llvm_unreachable("cannot deduce memory type.");
}
MemVT = In.ArgVT;
}

// Convert one element vectors to scalar.
if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
MemVT = MemVT.getScalarType();

if (MemVT.isExtended()) {
// This should really only happen if we have vec3 arguments
assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
MemVT = MemVT.getPow2VectorType(State.getContext());
} else if (In.ArgVT.isVector() && In.VT.isVector() &&
In.ArgVT.getScalarType() == In.VT.getScalarType()) {
assert(In.ArgVT.getVectorNumElements() > In.VT.getVectorNumElements());
// We have a vector value which has been split into a vector with
// the same scalar type, but fewer elements. This should handle
// all the floating-point vector types.
MemVT = In.VT;
} else if (In.ArgVT.isVector() &&
In.ArgVT.getVectorNumElements() == NumRegs) {
// This arg has been split so that each element is stored in a separate
// register.
MemVT = In.ArgVT.getScalarType();
} else if (In.ArgVT.isExtended()) {
// We have an extended type, like i65.
MemVT = In.VT;
} else {
unsigned MemoryBits = In.ArgVT.getStoreSizeInBits() / NumRegs;
assert(In.ArgVT.getStoreSizeInBits() % NumRegs == 0);
if (In.VT.isInteger()) {
MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
} else if (In.VT.isVector()) {
assert(!In.VT.getScalarType().isFloatingPoint());
unsigned NumElements = In.VT.getVectorNumElements();
assert(MemoryBits % NumElements == 0);
// This vector type has been split into another vector type with
// a different elements size.
EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
MemoryBits / NumElements);
MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
} else {
llvm_unreachable("cannot deduce memory type.");
}
}

unsigned PartOffset = 0;
for (unsigned i = 0; i != NumRegs; ++i) {
State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
BasePartOffset + PartOffset,
MemVT.getSimpleVT(),
CCValAssign::Full));
PartOffset += MemVT.getStoreSize();
}
// Convert one element vectors to scalar.
if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
MemVT = MemVT.getScalarType();

if (MemVT.isExtended()) {
// This should really only happen if we have vec3 arguments
assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
MemVT = MemVT.getPow2VectorType(State.getContext());
}

assert(MemVT.isSimple());
allocateKernArg(i, In.VT, MemVT.getSimpleVT(), CCValAssign::Full, In.Flags,
State);
}
}

Expand Down
7 changes: 2 additions & 5 deletions lib/Target/AMDGPU/AMDGPUISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,8 @@ class AMDGPUTargetLowering : public TargetLowering {
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) const;

void analyzeFormalArgumentsCompute(
CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const;

void analyzeFormalArgumentsCompute(CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const;
public:
AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);

Expand Down
5 changes: 3 additions & 2 deletions lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,8 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);

unsigned MaxAlign;
// FIXME: Alignment is broken broken with explicit arg offset.;
const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F);
if (TotalKernArgSize == 0)
return false;

Expand All @@ -92,11 +91,13 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));

unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
unsigned MaxAlign = 1;
uint64_t ExplicitArgOffset = 0;

for (Argument &Arg : F.args()) {
Type *ArgTy = Arg.getType();
unsigned Align = DL.getABITypeAlignment(ArgTy);
MaxAlign = std::max(Align, MaxAlign);
unsigned Size = DL.getTypeSizeInBits(ArgTy);
unsigned AllocSize = DL.getTypeAllocSize(ArgTy);

Expand Down
11 changes: 2 additions & 9 deletions lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,23 +24,16 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
MemoryBound(false),
WaveLimiter(false) {
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);

// FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
// except reserved size is not correctly aligned.
const Function &F = MF.getFunction();

if (auto *Resolver = MF.getMMI().getResolver()) {
if (AMDGPUPerfHintAnalysis *PHA = static_cast<AMDGPUPerfHintAnalysis*>(
Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID, true))) {
MemoryBound = PHA->isMemoryBound(&F);
WaveLimiter = PHA->needsWaveLimiter(&F);
MemoryBound = PHA->isMemoryBound(&MF.getFunction());
WaveLimiter = PHA->needsWaveLimiter(&MF.getFunction());
}
}

CallingConv::ID CC = F.getCallingConv();
if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
}

unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
Expand Down
15 changes: 13 additions & 2 deletions lib/Target/AMDGPU/AMDGPUMachineFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ class AMDGPUMachineFunction : public MachineFunctionInfo {
SmallDenseMap<const GlobalValue *, unsigned, 4> LocalMemoryObjects;

protected:
uint64_t ExplicitKernArgSize; // Cache for this.
unsigned MaxKernArgAlign; // Cache for this.
uint64_t ExplicitKernArgSize;
unsigned MaxKernArgAlign;

/// Number of bytes in the LDS that are being used.
unsigned LDSSize;
Expand All @@ -44,6 +44,17 @@ class AMDGPUMachineFunction : public MachineFunctionInfo {
public:
AMDGPUMachineFunction(const MachineFunction &MF);

uint64_t allocateKernArg(uint64_t Size, unsigned Align) {
assert(isPowerOf2_32(Align));
ExplicitKernArgSize = alignTo(ExplicitKernArgSize, Align);

uint64_t Result = ExplicitKernArgSize;
ExplicitKernArgSize += Size;

MaxKernArgAlign = std::max(Align, MaxKernArgAlign);
return Result;
}

uint64_t getExplicitKernArgSize() const {
return ExplicitKernArgSize;
}
Expand Down
Loading

0 comments on commit 1382a3a

Please sign in to comment.