@@ -7193,15 +7193,19 @@ static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7193
7193
}
7194
7194
7195
7195
// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
7196
- static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
7197
- if (ISD::isNON_EXTLoad(Elt.getNode())) {
7198
- auto *BaseLd = cast<LoadSDNode>(Elt);
7199
- if (!BaseLd->isSimple())
7200
- return false;
7196
+ static bool findEltLoadSrc(SDValue Elt, MemSDNode *&Ld, int64_t &ByteOffset) {
7197
+ if (auto *BaseLd = dyn_cast<AtomicSDNode>(Elt)) {
7201
7198
Ld = BaseLd;
7202
7199
ByteOffset = 0;
7203
7200
return true;
7204
- }
7201
+ } else if (auto *BaseLd = dyn_cast<LoadSDNode>(Elt))
7202
+ if (ISD::isNON_EXTLoad(Elt.getNode())) {
7203
+ if (!BaseLd->isSimple())
7204
+ return false;
7205
+ Ld = BaseLd;
7206
+ ByteOffset = 0;
7207
+ return true;
7208
+ }
7205
7209
7206
7210
switch (Elt.getOpcode()) {
7207
7211
case ISD::BITCAST:
@@ -7254,7 +7258,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7254
7258
APInt ZeroMask = APInt::getZero(NumElems);
7255
7259
APInt UndefMask = APInt::getZero(NumElems);
7256
7260
7257
- SmallVector<LoadSDNode *, 8> Loads(NumElems, nullptr);
7261
+ SmallVector<MemSDNode *, 8> Loads(NumElems, nullptr);
7258
7262
SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
7259
7263
7260
7264
// For each element in the initializer, see if we've found a load, zero or an
@@ -7304,7 +7308,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7304
7308
EVT EltBaseVT = EltBase.getValueType();
7305
7309
assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
7306
7310
"Register/Memory size mismatch");
7307
- LoadSDNode *LDBase = Loads[FirstLoadedElt];
7311
+ MemSDNode *LDBase = Loads[FirstLoadedElt];
7308
7312
assert(LDBase && "Did not find base load for merging consecutive loads");
7309
7313
unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
7310
7314
unsigned BaseSizeInBytes = BaseSizeInBits / 8;
@@ -7318,8 +7322,8 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7318
7322
7319
7323
// Check to see if the element's load is consecutive to the base load
7320
7324
// or offset from a previous (already checked) load.
7321
- auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
7322
- LoadSDNode *Ld = Loads[EltIdx];
7325
+ auto CheckConsecutiveLoad = [&](MemSDNode *Base, int EltIdx) {
7326
+ MemSDNode *Ld = Loads[EltIdx];
7323
7327
int64_t ByteOffset = ByteOffsets[EltIdx];
7324
7328
if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
7325
7329
int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
@@ -7347,7 +7351,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7347
7351
}
7348
7352
}
7349
7353
7350
- auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7354
+ auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, MemSDNode *LDBase) {
7351
7355
auto MMOFlags = LDBase->getMemOperand()->getFlags();
7352
7356
assert(LDBase->isSimple() &&
7353
7357
"Cannot merge volatile or atomic loads.");
@@ -9452,8 +9456,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9452
9456
{
9453
9457
SmallVector<SDValue, 64> Ops(Op->ops().take_front(NumElems));
9454
9458
if (SDValue LD =
9455
- EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9459
+ EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false)) {
9456
9460
return LD;
9461
+ }
9457
9462
}
9458
9463
9459
9464
// If this is a splat of pairs of 32-bit elements, we can use a narrower
@@ -60388,6 +60393,35 @@ static SDValue combineINTRINSIC_VOID(SDNode *N, SelectionDAG &DAG,
60388
60393
return SDValue();
60389
60394
}
60390
60395
60396
+ static SDValue combineVZEXT_LOAD(SDNode *N, SelectionDAG &DAG,
60397
+ TargetLowering::DAGCombinerInfo &DCI) {
60398
+ // Find the TokenFactor to locate the associated AtomicLoad.
60399
+ SDNode *ALD = nullptr;
60400
+ for (auto &TF : DAG.allnodes())
60401
+ if (TF.getOpcode() == ISD::TokenFactor) {
60402
+ SDValue L = TF.getOperand(0);
60403
+ SDValue R = TF.getOperand(1);
60404
+ if (L.getNode() == N)
60405
+ ALD = R.getNode();
60406
+ else if (R.getNode() == N)
60407
+ ALD = L.getNode();
60408
+ }
60409
+
60410
+ if (!ALD)
60411
+ return SDValue();
60412
+ if (!isa<AtomicSDNode>(ALD))
60413
+ return SDValue();
60414
+
60415
+ // Replace the VZEXT_LOAD with the AtomicLoad.
60416
+ SDLoc dl(N);
60417
+ SDValue SV =
60418
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
60419
+ N->getValueType(0).changeTypeToInteger(), SDValue(ALD, 0));
60420
+ SDValue BC = DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), SV);
60421
+ BC = DCI.CombineTo(N, BC, SDValue(ALD, 1));
60422
+ return BC;
60423
+ }
60424
+
60391
60425
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
60392
60426
DAGCombinerInfo &DCI) const {
60393
60427
SelectionDAG &DAG = DCI.DAG;
@@ -60584,6 +60618,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
60584
60618
case ISD::INTRINSIC_VOID: return combineINTRINSIC_VOID(N, DAG, DCI);
60585
60619
case ISD::FP_TO_SINT_SAT:
60586
60620
case ISD::FP_TO_UINT_SAT: return combineFP_TO_xINT_SAT(N, DAG, Subtarget);
60621
+ case X86ISD::VZEXT_LOAD: return combineVZEXT_LOAD(N, DAG, DCI);
60587
60622
// clang-format on
60588
60623
}
60589
60624
0 commit comments