@@ -190,6 +190,55 @@ entry:
190
190
ret <vscale x 1 x i64 > %y
191
191
}
192
192
193
+ define <vscale x 1 x double > @test10 (<vscale x 1 x double > %a , double %b ) nounwind {
194
+ ; CHECK-LABEL: test10:
195
+ ; CHECK: # %bb.0: # %entry
196
+ ; CHECK-NEXT: fmv.d.x ft0, a0
197
+ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu
198
+ ; CHECK-NEXT: vfmv.s.f v8, ft0
199
+ ; CHECK-NEXT: ret
200
+ entry:
201
+ %x = tail call i64 @llvm.riscv.vsetvlimax (i64 3 , i64 0 )
202
+ %y = call <vscale x 1 x double > @llvm.riscv.vfmv.s.f.nxv1f64 (
203
+ <vscale x 1 x double > %a , double %b , i64 1 )
204
+ ret <vscale x 1 x double > %y
205
+ }
206
+
207
+ define <vscale x 1 x double > @test11 (<vscale x 1 x double > %a , double %b ) nounwind {
208
+ ; CHECK-LABEL: test11:
209
+ ; CHECK: # %bb.0: # %entry
210
+ ; CHECK-NEXT: fmv.d.x ft0, a0
211
+ ; CHECK-NEXT: vsetivli a0, 6, e64, m1, tu, mu
212
+ ; CHECK-NEXT: vfmv.s.f v8, ft0
213
+ ; CHECK-NEXT: ret
214
+ entry:
215
+ %x = tail call i64 @llvm.riscv.vsetvli (i64 6 , i64 3 , i64 0 )
216
+ %y = call <vscale x 1 x double > @llvm.riscv.vfmv.s.f.nxv1f64 (
217
+ <vscale x 1 x double > %a , double %b , i64 2 )
218
+ ret <vscale x 1 x double > %y
219
+ }
220
+
221
+ define <vscale x 1 x double > @test12 (<vscale x 1 x double > %a , double %b , <vscale x 1 x i1 > %mask ) nounwind {
222
+ ; CHECK-LABEL: test12:
223
+ ; CHECK: # %bb.0: # %entry
224
+ ; CHECK-NEXT: fmv.d.x ft0, a0
225
+ ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu
226
+ ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
227
+ ; CHECK-NEXT: vfmv.s.f v8, ft0
228
+ ; CHECK-NEXT: ret
229
+ entry:
230
+ %x = call <vscale x 1 x double > @llvm.riscv.vfadd.mask.nxv1f64.f64 (
231
+ <vscale x 1 x double > %a ,
232
+ <vscale x 1 x double > %a ,
233
+ <vscale x 1 x double > %a ,
234
+ <vscale x 1 x i1 > %mask ,
235
+ i64 9 ,
236
+ i64 0 )
237
+ %y = call <vscale x 1 x double > @llvm.riscv.vfmv.s.f.nxv1f64 (
238
+ <vscale x 1 x double > %x , double %b , i64 2 )
239
+ ret <vscale x 1 x double > %y
240
+ }
241
+
193
242
declare <vscale x 1 x i64 > @llvm.riscv.vadd.mask.nxv1i64.nxv1i64 (
194
243
<vscale x 1 x i64 >,
195
244
<vscale x 1 x i64 >,
@@ -198,10 +247,24 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
198
247
i64 ,
199
248
i64 );
200
249
250
+ declare <vscale x 1 x double > @llvm.riscv.vfadd.mask.nxv1f64.f64 (
251
+ <vscale x 1 x double >,
252
+ <vscale x 1 x double >,
253
+ <vscale x 1 x double >,
254
+ <vscale x 1 x i1 >,
255
+ i64 ,
256
+ i64 );
257
+
201
258
declare <vscale x 1 x i64 > @llvm.riscv.vmv.s.x.nxv1i64 (
202
259
<vscale x 1 x i64 >,
203
260
i64 ,
204
261
i64 );
262
+
263
+ declare <vscale x 1 x double > @llvm.riscv.vfmv.s.f.nxv1f64
264
+ (<vscale x 1 x double >,
265
+ double ,
266
+ i64 )
267
+
205
268
declare i64 @llvm.riscv.vsetvli.i64 (i64 , i64 immarg, i64 immarg)
206
269
declare <vscale x 2 x i32 > @llvm.riscv.vle.nxv2i32.i64 (<vscale x 2 x i32 >* nocapture , i64 )
207
270
declare <vscale x 2 x i1 > @llvm.riscv.vmslt.nxv2i32.i32.i64 (<vscale x 2 x i32 >, i32 , i64 )
0 commit comments