-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[VPlan] Support multiple F(Max|Min)Num reductions. #161735
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Generalize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert. We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis.
|
@llvm/pr-subscribers-vectorizers Author: Florian Hahn (fhahn) ChangesGeneralize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert. We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis. Addresses a TODO from #148239 Patch is 41.21 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/161735.diff 6 Files Affected:
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index c8212af9f8e00..daffa1e7fab21 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -826,7 +826,7 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
};
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
- VPReductionPHIRecipe *RedPhiR = nullptr;
+ SmallVector<VPReductionPHIRecipe *> ReductionsToConvert;
bool HasUnsupportedPhi = false;
for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
if (isa<VPCanonicalIVPHIRecipe, VPWidenIntOrFpInductionRecipe>(&R))
@@ -837,19 +837,15 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
HasUnsupportedPhi = true;
continue;
}
- // For now, only a single reduction is supported.
- // TODO: Support multiple MaxNum/MinNum reductions and other reductions.
- if (RedPhiR)
- return false;
if (Cur->getRecurrenceKind() != RecurKind::FMaxNum &&
Cur->getRecurrenceKind() != RecurKind::FMinNum) {
HasUnsupportedPhi = true;
continue;
}
- RedPhiR = Cur;
+ ReductionsToConvert.push_back(Cur);
}
- if (!RedPhiR)
+ if (ReductionsToConvert.empty())
return true;
// We won't be able to resume execution in the scalar tail, if there are
@@ -858,15 +854,6 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
if (HasUnsupportedPhi || !Plan.hasScalarTail())
return false;
- VPValue *MinMaxOp = GetMinMaxCompareValue(RedPhiR);
- if (!MinMaxOp)
- return false;
-
- RecurKind RedPhiRK = RedPhiR->getRecurrenceKind();
- assert((RedPhiRK == RecurKind::FMaxNum || RedPhiRK == RecurKind::FMinNum) &&
- "unsupported reduction");
- (void)RedPhiRK;
-
/// Check if the vector loop of \p Plan can early exit and restart
/// execution of last vector iteration in the scalar loop. This requires all
/// recipes up to early exit point be side-effect free as they are
@@ -884,52 +871,69 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
}
VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock();
+ VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock();
+ VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin());
VPBuilder Builder(LatchVPBB->getTerminator());
- auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
- assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
+ VPValue *AnyNaN = nullptr;
+ SmallPtrSet<VPValue *, 2> RdxResults;
+ for (VPReductionPHIRecipe *RedPhiR : ReductionsToConvert) {
+ VPValue *MinMaxOp = GetMinMaxCompareValue(RedPhiR);
+ if (!MinMaxOp)
+ return false;
+
+ RecurKind RedPhiRK = RedPhiR->getRecurrenceKind();
+ assert((RedPhiRK == RecurKind::FMaxNum || RedPhiRK == RecurKind::FMinNum) &&
+ "unsupported reduction");
+ (void)RedPhiRK;
+
+ VPValue *IsNaN = Builder.createFCmp(CmpInst::FCMP_UNO, MinMaxOp, MinMaxOp);
+ VPValue *HasNaN = Builder.createNaryOp(VPInstruction::AnyOf, {IsNaN});
+ if (AnyNaN)
+ AnyNaN = Builder.createOr(AnyNaN, HasNaN);
+ else
+ AnyNaN = HasNaN;
+
+ // If we exit early due to NaNs, compute the final reduction result based
+ // on the reduction phi at the beginning of the last vector iteration.
+ auto *RdxResult = find_singleton<VPSingleDefRecipe>(
+ RedPhiR->users(), [](VPUser *U, bool) -> VPSingleDefRecipe * {
+ auto *VPI = dyn_cast<VPInstruction>(U);
+ if (VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult)
+ return VPI;
+ return nullptr;
+ });
+
+ auto *NewSel =
+ MiddleBuilder.createSelect(HasNaN, RedPhiR, RdxResult->getOperand(1));
+ RdxResult->setOperand(1, NewSel);
+ RdxResults.insert(RdxResult);
+ }
+
+ auto *LatchExitingBranch = LatchVPBB->getTerminator();
+ assert(match(LatchExitingBranch, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
"Unexpected terminator");
auto *IsLatchExitTaken =
Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
LatchExitingBranch->getOperand(1));
-
- VPValue *IsNaN = Builder.createFCmp(CmpInst::FCMP_UNO, MinMaxOp, MinMaxOp);
- VPValue *AnyNaN = Builder.createNaryOp(VPInstruction::AnyOf, {IsNaN});
auto *AnyExitTaken =
Builder.createNaryOp(Instruction::Or, {AnyNaN, IsLatchExitTaken});
Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
LatchExitingBranch->eraseFromParent();
- // If we exit early due to NaNs, compute the final reduction result based on
- // the reduction phi at the beginning of the last vector iteration.
- auto *RdxResult = find_singleton<VPSingleDefRecipe>(
- RedPhiR->users(), [](VPUser *U, bool) -> VPSingleDefRecipe * {
- auto *VPI = dyn_cast<VPInstruction>(U);
- if (VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult)
- return VPI;
- return nullptr;
- });
-
- auto *MiddleVPBB = Plan.getMiddleBlock();
- Builder.setInsertPoint(MiddleVPBB, MiddleVPBB->begin());
- auto *NewSel =
- Builder.createSelect(AnyNaN, RedPhiR, RdxResult->getOperand(1));
- RdxResult->setOperand(1, NewSel);
-
- auto *ScalarPH = Plan.getScalarPreheader();
// Update resume phis for inductions in the scalar preheader. If AnyNaN is
// true, the resume from the start of the last vector iteration via the
// canonical IV, otherwise from the original value.
- for (auto &R : ScalarPH->phis()) {
+ for (auto &R : Plan.getScalarPreheader()->phis()) {
auto *ResumeR = cast<VPPhi>(&R);
VPValue *VecV = ResumeR->getOperand(0);
- if (VecV == RdxResult)
+ if (RdxResults.contains(VecV))
continue;
if (auto *DerivedIV = dyn_cast<VPDerivedIVRecipe>(VecV)) {
if (DerivedIV->getNumUsers() == 1 &&
DerivedIV->getOperand(1) == &Plan.getVectorTripCount()) {
- auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(),
- &Plan.getVectorTripCount());
- DerivedIV->moveAfter(&*Builder.getInsertPoint());
+ auto *NewSel = MiddleBuilder.createSelect(AnyNaN, Plan.getCanonicalIV(),
+ &Plan.getVectorTripCount());
+ DerivedIV->moveAfter(&*MiddleBuilder.getInsertPoint());
DerivedIV->setOperand(1, NewSel);
continue;
}
@@ -941,7 +945,8 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
"FMaxNum/FMinNum reduction.\n");
return false;
}
- auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(), VecV);
+ auto *NewSel =
+ MiddleBuilder.createSelect(AnyNaN, Plan.getCanonicalIV(), VecV);
ResumeR->setOperand(0, NewSel);
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
index 56a1abd2384c8..79fd3fb0d7f6d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
@@ -59,7 +59,6 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP18:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -118,23 +118,86 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
; CHECK-LABEL: define float @test_fmax_and_fmin(
; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]]
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4
-; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]])
+; CHECK-NEXT: [[TMP5]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI3]], <4 x float> [[WIDE_LOAD4]])
+; CHECK-NEXT: [[TMP6]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD5]])
+; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD6]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD5]]
+; CHECK-NEXT: [[TMP9:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD6]]
+; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = freeze <4 x i1> [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <4 x i1> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP12]])
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]]
+; CHECK-NEXT: [[TMP16:%.*]] = freeze <4 x i1> [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = freeze <4 x i1> [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <4 x i1> [[TMP16]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP18]])
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i1> poison, i1 [[TMP19]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT7]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP13]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP22:%.*]] = or i1 [[TMP20]], [[TMP21]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP23:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP6]]
+; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP7]]
+; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[BROADCAST_SPLAT8]], <4 x float> [[VEC_PHI2]], <4 x float> [[TMP4]]
+; CHECK-NEXT: [[TMP26:%.*]] = select <4 x i1> [[BROADCAST_SPLAT8]], <4 x float> [[VEC_PHI3]], <4 x float> [[TMP5]]
+; CHECK-NEXT: [[TMP27:%.*]] = select i1 [[TMP20]], i64 [[IV]], i64 [[N_VEC]]
+; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP23]], <4 x float> [[TMP24]])
+; CHECK-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[RDX_MINMAX]])
+; CHECK-NEXT: [[RDX_MINMAX9:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP25]], <4 x float> [[TMP26]])
+; CHECK-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[RDX_MINMAX9]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: [[TMP30:%.*]] = xor i1 [[TMP20]], true
+; CHECK-NEXT: [[TMP31:%.*]] = and i1 [[CMP_N]], [[TMP30]]
+; CHECK-NEXT: br i1 [[TMP31]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP27]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP28]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX10:%.*]] = phi float [ [[TMP29]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX10]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV1]]
+; CHECK-NEXT: [[GEP_SRC_3:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV1]]
+; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_3]], align 4
; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]])
; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]])
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
-; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP29]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ], [ [[TMP28]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]]
; CHECK-NEXT: ret float [[SUB]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
index d4f1227a38bda..ac98e3c6e11f3 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
@@ -59,7 +59,6 @@ define float @fminnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fminnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
index 5b7c27a0b5f1b..e4995585a99e3 100644
--- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
@@ -59,7 +59,6 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -118,23 +118,86 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
; CHECK-LABEL: define float @test_fmax_and_fmin(
; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NE...
[truncated]
|
|
@llvm/pr-subscribers-llvm-transforms Author: Florian Hahn (fhahn) ChangesGeneralize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert. We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis. Addresses a TODO from #148239 Patch is 41.21 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/161735.diff 6 Files Affected:
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index c8212af9f8e00..daffa1e7fab21 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -826,7 +826,7 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
};
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
- VPReductionPHIRecipe *RedPhiR = nullptr;
+ SmallVector<VPReductionPHIRecipe *> ReductionsToConvert;
bool HasUnsupportedPhi = false;
for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
if (isa<VPCanonicalIVPHIRecipe, VPWidenIntOrFpInductionRecipe>(&R))
@@ -837,19 +837,15 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
HasUnsupportedPhi = true;
continue;
}
- // For now, only a single reduction is supported.
- // TODO: Support multiple MaxNum/MinNum reductions and other reductions.
- if (RedPhiR)
- return false;
if (Cur->getRecurrenceKind() != RecurKind::FMaxNum &&
Cur->getRecurrenceKind() != RecurKind::FMinNum) {
HasUnsupportedPhi = true;
continue;
}
- RedPhiR = Cur;
+ ReductionsToConvert.push_back(Cur);
}
- if (!RedPhiR)
+ if (ReductionsToConvert.empty())
return true;
// We won't be able to resume execution in the scalar tail, if there are
@@ -858,15 +854,6 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
if (HasUnsupportedPhi || !Plan.hasScalarTail())
return false;
- VPValue *MinMaxOp = GetMinMaxCompareValue(RedPhiR);
- if (!MinMaxOp)
- return false;
-
- RecurKind RedPhiRK = RedPhiR->getRecurrenceKind();
- assert((RedPhiRK == RecurKind::FMaxNum || RedPhiRK == RecurKind::FMinNum) &&
- "unsupported reduction");
- (void)RedPhiRK;
-
/// Check if the vector loop of \p Plan can early exit and restart
/// execution of last vector iteration in the scalar loop. This requires all
/// recipes up to early exit point be side-effect free as they are
@@ -884,52 +871,69 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
}
VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock();
+ VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock();
+ VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin());
VPBuilder Builder(LatchVPBB->getTerminator());
- auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
- assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
+ VPValue *AnyNaN = nullptr;
+ SmallPtrSet<VPValue *, 2> RdxResults;
+ for (VPReductionPHIRecipe *RedPhiR : ReductionsToConvert) {
+ VPValue *MinMaxOp = GetMinMaxCompareValue(RedPhiR);
+ if (!MinMaxOp)
+ return false;
+
+ RecurKind RedPhiRK = RedPhiR->getRecurrenceKind();
+ assert((RedPhiRK == RecurKind::FMaxNum || RedPhiRK == RecurKind::FMinNum) &&
+ "unsupported reduction");
+ (void)RedPhiRK;
+
+ VPValue *IsNaN = Builder.createFCmp(CmpInst::FCMP_UNO, MinMaxOp, MinMaxOp);
+ VPValue *HasNaN = Builder.createNaryOp(VPInstruction::AnyOf, {IsNaN});
+ if (AnyNaN)
+ AnyNaN = Builder.createOr(AnyNaN, HasNaN);
+ else
+ AnyNaN = HasNaN;
+
+ // If we exit early due to NaNs, compute the final reduction result based
+ // on the reduction phi at the beginning of the last vector iteration.
+ auto *RdxResult = find_singleton<VPSingleDefRecipe>(
+ RedPhiR->users(), [](VPUser *U, bool) -> VPSingleDefRecipe * {
+ auto *VPI = dyn_cast<VPInstruction>(U);
+ if (VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult)
+ return VPI;
+ return nullptr;
+ });
+
+ auto *NewSel =
+ MiddleBuilder.createSelect(HasNaN, RedPhiR, RdxResult->getOperand(1));
+ RdxResult->setOperand(1, NewSel);
+ RdxResults.insert(RdxResult);
+ }
+
+ auto *LatchExitingBranch = LatchVPBB->getTerminator();
+ assert(match(LatchExitingBranch, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
"Unexpected terminator");
auto *IsLatchExitTaken =
Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
LatchExitingBranch->getOperand(1));
-
- VPValue *IsNaN = Builder.createFCmp(CmpInst::FCMP_UNO, MinMaxOp, MinMaxOp);
- VPValue *AnyNaN = Builder.createNaryOp(VPInstruction::AnyOf, {IsNaN});
auto *AnyExitTaken =
Builder.createNaryOp(Instruction::Or, {AnyNaN, IsLatchExitTaken});
Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
LatchExitingBranch->eraseFromParent();
- // If we exit early due to NaNs, compute the final reduction result based on
- // the reduction phi at the beginning of the last vector iteration.
- auto *RdxResult = find_singleton<VPSingleDefRecipe>(
- RedPhiR->users(), [](VPUser *U, bool) -> VPSingleDefRecipe * {
- auto *VPI = dyn_cast<VPInstruction>(U);
- if (VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult)
- return VPI;
- return nullptr;
- });
-
- auto *MiddleVPBB = Plan.getMiddleBlock();
- Builder.setInsertPoint(MiddleVPBB, MiddleVPBB->begin());
- auto *NewSel =
- Builder.createSelect(AnyNaN, RedPhiR, RdxResult->getOperand(1));
- RdxResult->setOperand(1, NewSel);
-
- auto *ScalarPH = Plan.getScalarPreheader();
// Update resume phis for inductions in the scalar preheader. If AnyNaN is
// true, the resume from the start of the last vector iteration via the
// canonical IV, otherwise from the original value.
- for (auto &R : ScalarPH->phis()) {
+ for (auto &R : Plan.getScalarPreheader()->phis()) {
auto *ResumeR = cast<VPPhi>(&R);
VPValue *VecV = ResumeR->getOperand(0);
- if (VecV == RdxResult)
+ if (RdxResults.contains(VecV))
continue;
if (auto *DerivedIV = dyn_cast<VPDerivedIVRecipe>(VecV)) {
if (DerivedIV->getNumUsers() == 1 &&
DerivedIV->getOperand(1) == &Plan.getVectorTripCount()) {
- auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(),
- &Plan.getVectorTripCount());
- DerivedIV->moveAfter(&*Builder.getInsertPoint());
+ auto *NewSel = MiddleBuilder.createSelect(AnyNaN, Plan.getCanonicalIV(),
+ &Plan.getVectorTripCount());
+ DerivedIV->moveAfter(&*MiddleBuilder.getInsertPoint());
DerivedIV->setOperand(1, NewSel);
continue;
}
@@ -941,7 +945,8 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
"FMaxNum/FMinNum reduction.\n");
return false;
}
- auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(), VecV);
+ auto *NewSel =
+ MiddleBuilder.createSelect(AnyNaN, Plan.getCanonicalIV(), VecV);
ResumeR->setOperand(0, NewSel);
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
index 56a1abd2384c8..79fd3fb0d7f6d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
@@ -59,7 +59,6 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP18:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -118,23 +118,86 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
; CHECK-LABEL: define float @test_fmax_and_fmin(
; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]]
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4
-; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]])
+; CHECK-NEXT: [[TMP5]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI3]], <4 x float> [[WIDE_LOAD4]])
+; CHECK-NEXT: [[TMP6]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD5]])
+; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD6]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD5]]
+; CHECK-NEXT: [[TMP9:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD6]]
+; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = freeze <4 x i1> [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <4 x i1> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP12]])
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]]
+; CHECK-NEXT: [[TMP16:%.*]] = freeze <4 x i1> [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = freeze <4 x i1> [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <4 x i1> [[TMP16]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP18]])
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i1> poison, i1 [[TMP19]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT7]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP13]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP22:%.*]] = or i1 [[TMP20]], [[TMP21]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP23:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP6]]
+; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP7]]
+; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[BROADCAST_SPLAT8]], <4 x float> [[VEC_PHI2]], <4 x float> [[TMP4]]
+; CHECK-NEXT: [[TMP26:%.*]] = select <4 x i1> [[BROADCAST_SPLAT8]], <4 x float> [[VEC_PHI3]], <4 x float> [[TMP5]]
+; CHECK-NEXT: [[TMP27:%.*]] = select i1 [[TMP20]], i64 [[IV]], i64 [[N_VEC]]
+; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP23]], <4 x float> [[TMP24]])
+; CHECK-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[RDX_MINMAX]])
+; CHECK-NEXT: [[RDX_MINMAX9:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP25]], <4 x float> [[TMP26]])
+; CHECK-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[RDX_MINMAX9]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: [[TMP30:%.*]] = xor i1 [[TMP20]], true
+; CHECK-NEXT: [[TMP31:%.*]] = and i1 [[CMP_N]], [[TMP30]]
+; CHECK-NEXT: br i1 [[TMP31]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP27]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP28]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX10:%.*]] = phi float [ [[TMP29]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX10]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV1]]
+; CHECK-NEXT: [[GEP_SRC_3:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV1]]
+; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_3]], align 4
; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]])
; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]])
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
-; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP29]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ], [ [[TMP28]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]]
; CHECK-NEXT: ret float [[SUB]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
index d4f1227a38bda..ac98e3c6e11f3 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll
@@ -59,7 +59,6 @@ define float @fminnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fminnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
index 5b7c27a0b5f1b..e4995585a99e3 100644
--- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
@@ -59,7 +59,6 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]]
@@ -68,6 +67,7 @@ define float @fmaxnum(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -118,23 +118,86 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
; CHECK-LABEL: define float @test_fmax_and_fmin(
; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NE...
[truncated]
|
| RecurKind RedPhiRK = RedPhiR->getRecurrenceKind(); | ||
| assert((RedPhiRK == RecurKind::FMaxNum || RedPhiRK == RecurKind::FMinNum) && | ||
| "unsupported reduction"); | ||
| (void)RedPhiRK; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could this assertion be moved to the beginning of the for loop?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: And maybe add isxxxRecurrenceKind(RedPhiRK) for this assertion
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yep moved and used helper, thanks!
| VPValue *AnyNaN = nullptr; | ||
| SmallPtrSet<VPValue *, 2> RdxResults; | ||
| for (VPReductionPHIRecipe *RedPhiR : ReductionsToConvert) { | ||
| VPValue *MinMaxOp = GetMinMaxCompareValue(RedPhiR); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is the recurrence chain for RecurKind::FMinNum and RecurKind::FMaxNum limited to a single min/max operation (similar to FindLast, which has only one select)?
Seems like GetMinMaxCompareValue only returns the last min/max operation.
If multiple min/max operations can appear in the recurrence chain, should all of them be handled? Or only handle the last one is correct?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'll check separately.
| // If we exit early due to NaNs, compute the final reduction result based | ||
| // on the reduction phi at the beginning of the last vector iteration. | ||
| auto *RdxResult = find_singleton<VPSingleDefRecipe>( | ||
| RedPhiR->users(), [](VPUser *U, bool) -> VPSingleDefRecipe * { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we find the ComputeReductionResult by RedPhiR->getBackedgeValue() instead of RedPhiR?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I also added an assert that the ony expected users are RedPhiR and compute-reducton-result
Add helper to check for FMinNum and FMaxNum recurrence kinds, as suggested in #161735.
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
Add helper to check for FMinNum and FMaxNum recurrence kinds, as suggested in llvm/llvm-project#161735.
Add helper to check for FMinNum and FMaxNum recurrence kinds, as suggested in #161735.
Add helper to check for FMinNum and FMaxNum recurrence kinds, as suggested in llvm#161735.
fhahn
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ping :)
Mel-Chen
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
ayalz
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Post-approval review.
| VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock(); | ||
| VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock(); | ||
| VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin()); | ||
| VPBuilder Builder(LatchVPBB->getTerminator()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| VPBuilder Builder(LatchVPBB->getTerminator()); | |
| VPBuilder LatchBuilder(LatchVPBB->getTerminator()); |
consistent with MiddleBuilder.
Can be pre-applied independently.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done thanks
|
|
||
| VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); | ||
| VPReductionPHIRecipe *RedPhiR = nullptr; | ||
| SmallVector<VPReductionPHIRecipe *> ReductionsToConvert; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| SmallVector<VPReductionPHIRecipe *> ReductionsToConvert; | |
| SmallVector<VPReductionPHIRecipe *> MinMaxNumReductionsToHandle; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done thanks
| if (!MinMaxOp) | ||
| return false; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Better check if RedPhiR has a MinMax compare value earlier, possibly when placing it in ReductionsToConvert, than here, potentially after having made partial changes to VPlan?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yep, moved to the previous loop, thanks
| if (AnyNaN) | ||
| AnyNaN = Builder.createOr(AnyNaN, HasNaN); | ||
| else | ||
| AnyNaN = HasNaN; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| if (AnyNaN) | |
| AnyNaN = Builder.createOr(AnyNaN, HasNaN); | |
| else | |
| AnyNaN = HasNaN; | |
| AnyNaN = AnyNaN ? Builder.createOr(AnyNaN, HasNaN) : HasNaN; |
?
Can also have versions of createOr() etc. that accept a null operand - returning the non-null one, effectively treating null as "all true", as done for mask operands.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, updated to use ?
| // If we exit early due to NaNs, compute the final reduction result based | ||
| // on the reduction phi at the beginning of the last vector iteration. | ||
| auto *RdxResult = find_singleton<VPSingleDefRecipe>( | ||
| RedPhiR->getBackedgeValue()->users(), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Previously this looked over the users of RedPhiR rather than those of its backedge value?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yep, this was changed regarding @Mel-Chen's comment. Using the back-edge value to look up the outside users seems more direct, even though ComputeReductionResults currently has the phi itself as operand as well.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Better applied separately, being independent of single vs. multiple reductions?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I restorted the original code for now, will do the update separately, thanks
| return false; | ||
|
|
||
| VPValue *IsNaN = Builder.createFCmp(CmpInst::FCMP_UNO, MinMaxOp, MinMaxOp); | ||
| VPValue *HasNaN = Builder.createNaryOp(VPInstruction::AnyOf, {IsNaN}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The original AnyNaN indicates if any lane is NaN, better rename it AnyNaNLane rather than HasNaN?
The new AnyNaN indicates if any (lane of any) MinMaxNum reduction is NaN, better call it AnyNaNReduction rather than redefining AnyNaN?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks renamed. although things moved around a bit
| auto *NewSel = | ||
| MiddleBuilder.createSelect(HasNaN, RedPhiR, RdxResult->getOperand(1)); | ||
| RdxResult->setOperand(1, NewSel); | ||
| RdxResults.insert(RdxResult); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
assert RdxResult is not already in RdxResults?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
added, thanks
| ; CHECK-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP18]]) | ||
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i1> poison, i1 [[TMP19]], i64 0 | ||
| ; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT7]], <4 x i1> poison, <4 x i32> zeroinitializer | ||
| ; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP13]], [[TMP19]] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might it be better to vector-or TMP12 with TMP18 feeding a single reduce.or, rather than applying two reduce.or's feeding this scalar-or?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, udpated, thanks. This required restructing the code to first iterate over all reductions and create the wide OR across different reductions, followed by creating the AnyOf, as well as another loop that creates the selects outside the loop.
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP13]], i64 0 | ||
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be better to sink this broadcasting down to the middle block?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is an independent issue, will check why we don't materialize the broadcast outside the vector loop
cc58f29 to
65031f4
Compare
| // If we exit early due to NaNs, compute the final reduction result based | ||
| // on the reduction phi at the beginning of the last vector iteration. | ||
| auto *RdxResult = find_singleton<VPSingleDefRecipe>( | ||
| RedPhiR->getBackedgeValue()->users(), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Better applied separately, being independent of single vs. multiple reductions?
| auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator()); | ||
| assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount && | ||
| VPBuilder LatchBuilder(LatchVPBB->getTerminator()); | ||
| VPValue *IsNaNLane = nullptr; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| VPValue *IsNaNLane = nullptr; | |
| VPValue *AllNaNLanes = nullptr; |
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated naming, thanks
| assert(RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind( | ||
| RedPhiR->getRecurrenceKind()) && | ||
| "unsupported reduction"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
RedPhiR used only by assert.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, I moved the assert to the second loop where only RedPhiR is used
| LatchBuilder.createNaryOp(VPInstruction::AnyOf, {IsNaNLane}); | ||
| VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock(); | ||
| VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin()); | ||
| for (const auto &[RedPhiR, MinMaxOp] : MinMaxNumReductionsToHandle) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
MinMaxOp unused.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
removed thanks
| RedPhiR->getRecurrenceKind()) && | ||
| "unsupported reduction"); | ||
|
|
||
| VPValue *IsNaN = |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| VPValue *IsNaN = | |
| VPValue *RedNaNLanes = |
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated, thanks
| auto *VPI = dyn_cast<VPInstruction>(U); | ||
| if (VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: could use match?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yep, will introcuce a matcher and will also update this here, together with changing to iterating over the backedge value.
| ; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] | ||
| ; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
TODO - Would be nice to fold pairs together into:
| ; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] | |
| ; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]] | |
| ; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Added, thanks! I think on AArch64 it will still expand to 2 separate compares, but X86 has a dedicated instruction it seems: https://llvm.godbolt.org/z/E6vYaY1vT
Question is where to best do it? Perhaps as simplification after unrolling?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Unrolling may indeed expose such opportunity, so may multiple reductions?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yep, both should be a fold-able, the multiple reduction case using OR as root, the single reduction case the AnyOf I think.It's probably better to handle as independent folds, rather than complicating the logic and handling only the multiple reduction case here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure. Initial comment was to currently note as a TODO.
Generalize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert. We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis. Addresses a TODO from llvm/llvm-project#148239 PR: llvm/llvm-project#161735
Generalize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert. We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis. Addresses a TODO from llvm#148239 PR: llvm#161735
Fold or (fcmp uno %A, %A), (fcmp uno %B, %B), ... -> or (fcmp uno %A, %B), ... This pattern is generated to check if any vector lane is NaN, and combining multiple compares is beneficial on architectures that have dedicated instructions. Alive2 Proof: https://alive2.llvm.org/ce/z/vA_aoM Combine suggested as part of llvm#161735
Fold or (fcmp uno %A, %A), (fcmp uno %B, %B), ... -> or (fcmp uno %A, %B), ... This pattern is generated to check if any vector lane is NaN, and combining multiple compares is beneficial on architectures that have dedicated instructions. Alive2 Proof: https://alive2.llvm.org/ce/z/vA_aoM Combine suggested as part of #161735 PR: #167251
Fold or (fcmp uno %A, %A), (fcmp uno %B, %B), ... -> or (fcmp uno %A, %B), ... This pattern is generated to check if any vector lane is NaN, and combining multiple compares is beneficial on architectures that have dedicated instructions. Alive2 Proof: https://alive2.llvm.org/ce/z/vA_aoM Combine suggested as part of llvm#161735 PR: llvm#167251
Generalize handleMaxMinNumReductions to handle any number of F(Max|Min)Num reductions by collecting a vector of reductions to convert.
We then add NaN checks for all of them, followed by adjusting the branch controlling the vector loop region, and updating the resume phis.
Addresses a TODO from #148239