@@ -69,6 +69,39 @@ static const Intrinsic::ID ScalableVlsegIntrIds[] = {
6969 Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
7070 Intrinsic::riscv_vlseg8_mask};
7171
72+ static const Intrinsic::ID FixedVssegIntrIds[] = {
73+ Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
74+ Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
75+ Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
76+ Intrinsic::riscv_seg8_store_mask};
77+
78+ static const Intrinsic::ID ScalableVssegIntrIds[] = {
79+ Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
80+ Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
81+ Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
82+ Intrinsic::riscv_vsseg8_mask};
83+
84+ static bool isMultipleOfN (const Value *V, const DataLayout &DL, unsigned N) {
85+ assert (N);
86+ if (N == 1 )
87+ return true ;
88+
89+ using namespace PatternMatch ;
90+ // Right now we're only recognizing the simplest pattern.
91+ uint64_t C;
92+ if (match (V, m_CombineOr (m_ConstantInt (C),
93+ m_NUWMul (m_Value (), m_ConstantInt (C)))) &&
94+ C && C % N == 0 )
95+ return true ;
96+
97+ if (isPowerOf2_32 (N)) {
98+ KnownBits KB = llvm::computeKnownBits (V, DL);
99+ return KB.countMinTrailingZeros () >= Log2_32 (N);
100+ }
101+
102+ return false ;
103+ }
104+
72105// / Lower an interleaved load into a vlsegN intrinsic.
73106// /
74107// / E.g. Lower an interleaved load (Factor = 2):
@@ -134,18 +167,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
134167 return true ;
135168}
136169
137- static const Intrinsic::ID FixedVssegIntrIds[] = {
138- Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
139- Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
140- Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
141- Intrinsic::riscv_seg8_store_mask};
142-
143- static const Intrinsic::ID ScalableVssegIntrIds[] = {
144- Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
145- Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
146- Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
147- Intrinsic::riscv_vsseg8_mask};
148-
149170// / Lower an interleaved store into a vssegN intrinsic.
150171// /
151172// / E.g. Lower an interleaved store (Factor = 3):
@@ -235,27 +256,6 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
235256 return true ;
236257}
237258
238- static bool isMultipleOfN (const Value *V, const DataLayout &DL, unsigned N) {
239- assert (N);
240- if (N == 1 )
241- return true ;
242-
243- using namespace PatternMatch ;
244- // Right now we're only recognizing the simplest pattern.
245- uint64_t C;
246- if (match (V, m_CombineOr (m_ConstantInt (C),
247- m_NUWMul (m_Value (), m_ConstantInt (C)))) &&
248- C && C % N == 0 )
249- return true ;
250-
251- if (isPowerOf2_32 (N)) {
252- KnownBits KB = llvm::computeKnownBits (V, DL);
253- return KB.countMinTrailingZeros () >= Log2_32 (N);
254- }
255-
256- return false ;
257- }
258-
259259bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad (
260260 Instruction *Load, Value *Mask, IntrinsicInst *DI) const {
261261 const unsigned Factor = getDeinterleaveIntrinsicFactor (DI->getIntrinsicID ());
0 commit comments