Skip to content

Commit

Permalink
Merge changes from parent branch
Browse files Browse the repository at this point in the history
[tfs-changeset: 1466545]
  • Loading branch information
dotnet-bot authored and jkotas committed May 7, 2015
1 parent c6efc70 commit 484a2cf
Show file tree
Hide file tree
Showing 62 changed files with 1,357 additions and 317 deletions.
1 change: 1 addition & 0 deletions src/debug/daccess/nidump.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5355,6 +5355,7 @@ const NativeImageDumper::EnumMnemonics s_CorCompileHdrFlags[] =
#define CCHF_ENTRY(f) NativeImageDumper::EnumMnemonics(f, W(#f))
CCHF_ENTRY(CORCOMPILE_HEADER_HAS_SECURITY_DIRECTORY),
CCHF_ENTRY(CORCOMPILE_HEADER_IS_IBC_OPTIMIZED),
CCHF_ENTRY(CORCOMPILE_HEADER_IS_READY_TO_RUN),
#undef CCHF_ENTRY
};

Expand Down
10 changes: 10 additions & 0 deletions src/inc/corcompile.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,16 @@ enum CorCompileCodegen

CORCOMPILE_CODEGEN_PROFILING = 0x0004, // supports profiling
CORCOMPILE_CODEGEN_PROF_INSTRUMENTING = 0x0008, // code is instrumented to collect profile count info

#if defined(_TARGET_AMD64_) && !defined(FEATURE_CORECLR)
CORCOMPILE_CODEGEN_USE_RYUJIT = 0x0100, // code is generated by Ryu JIT
#endif
};

#if defined(_TARGET_AMD64_) && !defined(FEATURE_CORECLR)
bool UseRyuJit();
#endif

// Used for INativeImageInstallInfo::GetConfigMask()
// A bind will ask for the particular bits it needs set; if all bits are set, it is a match. Additional
// bits are ignored.
Expand All @@ -225,6 +233,8 @@ enum CorCompileHeaderFlags
// Note it is useless to cache the actual directory contents
// since it must be verified as part of the original image
CORCOMPILE_HEADER_IS_IBC_OPTIMIZED = 0x00000002,

CORCOMPILE_HEADER_IS_READY_TO_RUN = 0x00000004,
};

//
Expand Down
8 changes: 6 additions & 2 deletions src/inc/eetwain.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
#define CHECK_APP_DOMAIN 0
#endif

#define NO_OVERRIDE_OFFSET (DWORD)-1

struct EHContext;

#ifdef DACCESS_COMPILE
Expand Down Expand Up @@ -231,7 +233,8 @@ virtual bool EnumGcRefs(PREGDISPLAY pContext,
EECodeInfo *pCodeInfo,
unsigned flags,
GCEnumCallback pCallback,
LPVOID hCallBack) = 0;
LPVOID hCallBack,
DWORD relOffsetOverride = NO_OVERRIDE_OFFSET) = 0;

/*
Return the address of the local security object reference
Expand Down Expand Up @@ -460,7 +463,8 @@ bool EnumGcRefs(PREGDISPLAY pContext,
EECodeInfo *pCodeInfo,
unsigned flags,
GCEnumCallback pCallback,
LPVOID hCallBack);
LPVOID hCallBack,
DWORD relOffsetOverride = NO_OVERRIDE_OFFSET);

#ifdef FEATURE_CONSERVATIVE_GC
// Temporary conservative collection, for testing purposes, until we have
Expand Down
15 changes: 15 additions & 0 deletions src/inc/eventtracebase.h
Original file line number Diff line number Diff line change
Expand Up @@ -650,8 +650,23 @@ namespace ETW
public:
#ifdef FEATURE_EVENT_TRACE
static VOID ExceptionThrown(CrawlFrame *pCf, BOOL bIsReThrownException, BOOL bIsNewException);
static VOID ExceptionThrownEnd();
static VOID ExceptionCatchBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP);
static VOID ExceptionCatchEnd();
static VOID ExceptionFinallyBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP);
static VOID ExceptionFinallyEnd();
static VOID ExceptionFilterBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP);
static VOID ExceptionFilterEnd();

#else
static VOID ExceptionThrown(CrawlFrame *pCf, BOOL bIsReThrownException, BOOL bIsNewException) {};
static VOID ExceptionThrownEnd() {};
static VOID ExceptionCatchBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP) {};
static VOID ExceptionCatchEnd() {};
static VOID ExceptionFinallyBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP) {};
static VOID ExceptionFinallyEnd() {};
static VOID ExceptionFilterBegin(MethodDesc * pMethodDesc, PVOID pEntryEIP) {};
static VOID ExceptionFilterEnd() {};
#endif // FEATURE_EVENT_TRACE
typedef union _ExceptionStructs
{
Expand Down
1 change: 1 addition & 0 deletions src/inc/pedecoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ class PEDecoder
const void *GetNativePreferredBase() const;
BOOL GetNativeILHasSecurityDirectory() const;
BOOL GetNativeILIsIbcOptimized() const;
BOOL GetNativeILHasReadyToRunHeader() const;
BOOL IsNativeILILOnly() const;
BOOL IsNativeILDll() const;
void GetNativeILPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine) const;
Expand Down
15 changes: 15 additions & 0 deletions src/inc/pedecoder.inl
Original file line number Diff line number Diff line change
Expand Up @@ -1046,6 +1046,21 @@ inline BOOL PEDecoder::GetNativeILIsIbcOptimized() const
return (GetNativeHeader()->Flags & CORCOMPILE_HEADER_IS_IBC_OPTIMIZED) != 0;
}

inline BOOL PEDecoder::GetNativeILHasReadyToRunHeader() const
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckNativeHeader());
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;

PREFIX_ASSUME (GetNativeHeader()!=NULL);
return (GetNativeHeader()->Flags & CORCOMPILE_HEADER_IS_READY_TO_RUN) != 0;
}

inline BOOL PEDecoder::IsNativeILILOnly() const
{
CONTRACTL
Expand Down
17 changes: 10 additions & 7 deletions src/jit/codegenxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6759,6 +6759,7 @@ void CodeGen::genEmitHelperCall(unsigned helper,
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);

if (!addr)
{
Expand All @@ -6782,13 +6783,16 @@ void CodeGen::genEmitHelperCall(unsigned helper,
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
else
{
// The call target must not overwrite any live variable, though it may not be in the
// kill set for the call.
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & regSet.rsMaskVars) == RBM_NONE);
}

regMaskTP callTargetMask = genRegMask(callTargetReg);
regMaskTP callKillSet = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);

// assert that all registers in callTargetMask are in the callKillSet
noway_assert((callTargetMask & callKillSet) == callTargetMask);
#endif
callTarget = callTargetReg;
CodeGen::genSetRegToIcon(callTarget, (ssize_t) pAddr, TYP_I_IMPL);
Expand All @@ -6812,7 +6816,6 @@ void CodeGen::genEmitHelperCall(unsigned helper,
emitter::emitNoGChelper(helper));


regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
regTracker.rsTrashRegSet(killMask);
regTracker.rsTrashRegsForGCInterruptability();
}
Expand Down
21 changes: 21 additions & 0 deletions src/jit/flowgraph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16430,6 +16430,27 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
HBtab->ebdHndBeg = bPrev;
bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
bPrev->bbRefs++;

// If this is a handler for a filter, the last block of the filter will end with
// a BBJ_EJFILTERRET block that has a bbJumpDest that jumps to the first block of
// it's handler. So we need to update it to keep things in sync.
//
if (HBtab->HasFilter())
{
BasicBlock* bFilterLast = HBtab->BBFilterLast();
assert(bFilterLast != nullptr);
assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET);
assert(bFilterLast->bbJumpDest == block);
#ifdef DEBUG
if (verbose)
{
printf("EH#%u: Updating bbJumpDest for filter ret block: BB%02u => BB%02u\n",
ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum);
}
#endif // DEBUG
// Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev'
bFilterLast->bbJumpDest = bPrev;
}
}

if (HBtab->HasFilter() && (HBtab->ebdFilter == block))
Expand Down
6 changes: 5 additions & 1 deletion src/jit/gentree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11597,9 +11597,13 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler * comp)
unreached(); // Should have been covered by assert above.

case BBJ_EHFILTERRET:
{
assert(comp != NULL); // Or else we're not looking for successors.
BasicBlock* result = comp->fgFirstBlockOfHandler(this);
noway_assert(result == bbJumpDest);
// Handler is the (sole) normal successor of the filter.
return comp->fgFirstBlockOfHandler(this);
return result;
}

case BBJ_EHFINALLYRET:
return comp->fgSuccOfFinallyRet(this, i);
Expand Down
71 changes: 1 addition & 70 deletions src/jit/lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -417,76 +417,6 @@ GenTreePtr Lowering::CreateLocalTempAsg(GenTreePtr rhs,
return store;
}

/** Creates a byref to byref assignment where the actual values are not
* GC pointers. The assignment has the following shape:
* [dstObj + scale*index + offset] = [srcObj + scale*index + offset]
* The IR generated for this is:
* GT_ASG(
* GT_IND(
* GT_LEA(dstObj, index, scale, offset)),
* GT_IND(
* GT_LEA(srcObj, index, scale, offset)))
*/
GenTreePtr Lowering::CreateAsgByRefNonGcStmt (Compiler* comp,
BasicBlock* block,
GenTreePtr srcObj,
GenTreePtr dstObj,
GenTreePtr index,
unsigned scale,
unsigned offset)
{
assert(srcObj != nullptr && dstObj != nullptr);
var_types type = TYP_INT;
switch (scale)
{
case 4:
type = TYP_INT;
break;
case 2:
type = TYP_USHORT;
break;
case 1:
type = TYP_UBYTE;
break;
default:
noway_assert(!"Unsupported scale size for addressing modes");
}
GenTreePtr gtClonedSrc = comp->gtClone(srcObj);
GenTreePtr gtClonedDst = comp->gtClone(dstObj);
GenTreePtr gtClonedIndex = nullptr;
GenTreePtr gtClonedIndex2 = nullptr;

assert(gtClonedSrc != nullptr && gtClonedDst != nullptr);

if (index != nullptr)
{
gtClonedIndex = comp->gtClone(index);
gtClonedIndex2 = comp->gtClone(index);
assert(gtClonedIndex != nullptr && gtClonedIndex2 != nullptr);
}

GenTreePtr gtSrcAddrNode = new(comp, GT_LEA) GenTreeAddrMode(type,
gtClonedSrc,
gtClonedIndex,
scale,
offset);
GenTreePtr gtSrcIndirNode = comp->gtNewOperNode(GT_IND,
type,
gtSrcAddrNode);
GenTreePtr gtDstAddrNode = new(comp, GT_LEA) GenTreeAddrMode(type,
gtClonedDst,
gtClonedIndex2,
scale,
offset);
GenTreePtr gtDstIndirNode = comp->gtNewOperNode(GT_IND,
type,
gtDstAddrNode);
GenTreePtr gtByRefAsgStmt = comp->fgNewStmtFromTree(
comp->gtNewAssignNode(gtDstIndirNode, gtSrcIndirNode),
block);
return gtByRefAsgStmt;
}

// This is the main entry point for Lowering.

// In addition to that, LowerNode is also responsible for initializing the
Expand Down Expand Up @@ -2086,6 +2016,7 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
originalThisValue->InsertAfterSelf(newThisAddr);

GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
newThis->SetCosts(IND_COST_EX, 2);
newThisAddr->InsertAfterSelf(newThis);
*pThisExpr = newThis;

Expand Down
7 changes: 0 additions & 7 deletions src/jit/lower.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,6 @@ class Lowering : public Phase
GenTreePtr indirCandidate);

GenTreePtr CreateLocalTempAsg (GenTreePtr rhs, unsigned refCount, GenTreePtr *ppLclVar = nullptr);
GenTreePtr CreateAsgByRefNonGcStmt (Compiler* comp,
BasicBlock* block,
GenTreePtr srcObj,
GenTreePtr dstObj,
GenTreePtr index,
unsigned scale,
unsigned offset);

bool AreSourcesPossiblyModified (GenTree* use, GenTree* src1, GenTree *src2);
void ReplaceNode (GenTree** ppTreeLocation,
Expand Down
25 changes: 19 additions & 6 deletions src/jit/lowerxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1874,19 +1874,32 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree, LinearScan* lsra)
// Otherwise, if the baseType is floating point, the targetReg will be a xmm reg and we
// can use that in the process of extracting the element.
//
// If the index is a constant and base type is a small int we can use pextrw.
// If the index is a constant and base type is a small int we can use pextrw, but on AVX
// we will need a temp if are indexing into the upper half of the AVX register.
// In all other cases with constant index, we need a temp xmm register to extract the
// element if index is other than zero.

if (!op2->IsCnsIntOrI())
{
(void) comp->getSIMDInitTempVarNum();
}
else if (!varTypeIsFloating(simdTree->gtSIMDBaseType) &&
!varTypeIsSmallInt(simdTree->gtSIMDBaseType) &&
!op2->IsZero())
else if (!varTypeIsFloating(simdTree->gtSIMDBaseType))
{
info->internalFloatCount = 1;
info->setInternalCandidates(lsra, lsra->allSIMDRegs());
bool needFloatTemp;
if (varTypeIsSmallInt(simdTree->gtSIMDBaseType) && (comp->getSIMDInstructionSet() == InstructionSet_AVX))
{
int byteShiftCnt = (int) op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->gtSIMDBaseType);
needFloatTemp = (byteShiftCnt >= 16);
}
else
{
needFloatTemp = !op2->IsZero();
}
if (needFloatTemp)
{
info->internalFloatCount = 1;
info->setInternalCandidates(lsra, lsra->allSIMDRegs());
}
}
break;

Expand Down
25 changes: 17 additions & 8 deletions src/jit/lsra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2974,6 +2974,17 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
srcInterval->assignRelatedInterval(varDefInterval);
}
}
// We can have a case where the source of the store has a different register type,
// e.g. when the store is of a return value temp, and op1 is a Vector2
// (8-byte SIMD, which is TYP_DOUBLE at this point). We will need to set the
// src candidates accordingly on op1 so that LSRA will generate a copy.
// We could do this during Lowering, but at that point we don't know whether
// this lclVar will be a register candidate, and if not, we would prefer to leave
// the type alone.
if (regType(tree->gtGetOp1()->TypeGet()) != regType(tree->TypeGet()))
{
tree->gtGetOp1()->gtLsraInfo.setSrcCandidates(this, allRegs(tree->TypeGet()));
}
}

if ((tree->gtFlags & GTF_VAR_DEATH) == 0)
Expand Down Expand Up @@ -4154,7 +4165,7 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
// Notes:
// This will nearly always be identical to the registerType of the interval, except in the case
// of SIMD types of 8 bytes (currently only Vector2) when they are passed and returned in integer
// registers.
// registers, or copied to a return temp.
// This method need only be called in situations where we may be dealing with the register requirements
// of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when
// we are interested in the "defining" type of the interval). This is because the situation of interest
Expand All @@ -4169,10 +4180,9 @@ LinearScan::getRegisterType(Interval *currentInterval, RefPosition* refPosition)
#if defined(FEATURE_SIMD) && defined(_TARGET_AMD64_)
if ((candidates & allRegs(regType)) == RBM_NONE)
{
assert(genMaxOneBit(candidates) &&
(regType == TYP_DOUBLE) &&
(refPosition->refType == RefTypeUse) &&
((candidates & (RBM_ARG_REGS | RBM_LNGRET)) != RBM_NONE));
assert((regType == TYP_DOUBLE) &&
(refPosition->refType == RefTypeUse) &&
((candidates & allRegs(TYP_INT)) != RBM_NONE));
regType = TYP_INT;
}
#else // !(defined(FEATURE_SIMD) && defined(_TARGET_AMD64_))
Expand Down Expand Up @@ -6694,9 +6704,8 @@ LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosi
assert(lclVarInterval->isLocalVar == true);
LclVarDsc * varDsc = compiler->lvaTable + lclVarInterval->varNum;
assert(varDsc->lvType == LargeVectorType);
regNumber lclVarReg = varDsc->lvRegNum;
assert(lclVarReg != REG_NA);
if (lclVarReg == REG_STK)
regNumber lclVarReg = lclVarInterval->physReg;
if (lclVarReg == REG_NA)
{
return;
}
Expand Down
Loading

0 comments on commit 484a2cf

Please sign in to comment.