|
4 | 4 | ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
|
5 | 5 | ; RUN: -verify-machineinstrs < %s | FileCheck %s
|
6 | 6 |
|
| 7 | +declare <vscale x 1 x i1> @llvm.vp.select.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, <vscale x 1 x i1>, i32) |
| 8 | + |
| 9 | +define <vscale x 1 x i1> @select_nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i1> %c, i32 zeroext %evl) { |
| 10 | +; CHECK-LABEL: select_nxv1i1: |
| 11 | +; CHECK: # %bb.0: |
| 12 | +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu |
| 13 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 14 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 15 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 16 | +; CHECK-NEXT: ret |
| 17 | + %v = call <vscale x 1 x i1> @llvm.vp.select.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i1> %c, i32 %evl) |
| 18 | + ret <vscale x 1 x i1> %v |
| 19 | +} |
| 20 | + |
| 21 | +declare <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32) |
| 22 | + |
| 23 | +define <vscale x 2 x i1> @select_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, <vscale x 2 x i1> %c, i32 zeroext %evl) { |
| 24 | +; CHECK-LABEL: select_nxv2i1: |
| 25 | +; CHECK: # %bb.0: |
| 26 | +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu |
| 27 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 28 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 29 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 30 | +; CHECK-NEXT: ret |
| 31 | + %v = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, <vscale x 2 x i1> %c, i32 %evl) |
| 32 | + ret <vscale x 2 x i1> %v |
| 33 | +} |
| 34 | + |
| 35 | +declare <vscale x 4 x i1> @llvm.vp.select.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32) |
| 36 | + |
| 37 | +define <vscale x 4 x i1> @select_nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, <vscale x 4 x i1> %c, i32 zeroext %evl) { |
| 38 | +; CHECK-LABEL: select_nxv4i1: |
| 39 | +; CHECK: # %bb.0: |
| 40 | +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu |
| 41 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 42 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 43 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 44 | +; CHECK-NEXT: ret |
| 45 | + %v = call <vscale x 4 x i1> @llvm.vp.select.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, <vscale x 4 x i1> %c, i32 %evl) |
| 46 | + ret <vscale x 4 x i1> %v |
| 47 | +} |
| 48 | + |
| 49 | +declare <vscale x 8 x i1> @llvm.vp.select.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32) |
| 50 | + |
| 51 | +define <vscale x 8 x i1> @select_nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, <vscale x 8 x i1> %c, i32 zeroext %evl) { |
| 52 | +; CHECK-LABEL: select_nxv8i1: |
| 53 | +; CHECK: # %bb.0: |
| 54 | +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu |
| 55 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 56 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 57 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 58 | +; CHECK-NEXT: ret |
| 59 | + %v = call <vscale x 8 x i1> @llvm.vp.select.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, <vscale x 8 x i1> %c, i32 %evl) |
| 60 | + ret <vscale x 8 x i1> %v |
| 61 | +} |
| 62 | + |
| 63 | +declare <vscale x 16 x i1> @llvm.vp.select.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32) |
| 64 | + |
| 65 | +define <vscale x 16 x i1> @select_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, i32 zeroext %evl) { |
| 66 | +; CHECK-LABEL: select_nxv16i1: |
| 67 | +; CHECK: # %bb.0: |
| 68 | +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu |
| 69 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 70 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 71 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 72 | +; CHECK-NEXT: ret |
| 73 | + %v = call <vscale x 16 x i1> @llvm.vp.select.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, i32 %evl) |
| 74 | + ret <vscale x 16 x i1> %v |
| 75 | +} |
| 76 | + |
| 77 | +declare <vscale x 32 x i1> @llvm.vp.select.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32) |
| 78 | + |
| 79 | +define <vscale x 32 x i1> @select_nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, <vscale x 32 x i1> %c, i32 zeroext %evl) { |
| 80 | +; CHECK-LABEL: select_nxv32i1: |
| 81 | +; CHECK: # %bb.0: |
| 82 | +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu |
| 83 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 84 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 85 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 86 | +; CHECK-NEXT: ret |
| 87 | + %v = call <vscale x 32 x i1> @llvm.vp.select.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, <vscale x 32 x i1> %c, i32 %evl) |
| 88 | + ret <vscale x 32 x i1> %v |
| 89 | +} |
| 90 | + |
| 91 | +declare <vscale x 64 x i1> @llvm.vp.select.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, <vscale x 64 x i1>, i32) |
| 92 | + |
| 93 | +define <vscale x 64 x i1> @select_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, <vscale x 64 x i1> %c, i32 zeroext %evl) { |
| 94 | +; CHECK-LABEL: select_nxv64i1: |
| 95 | +; CHECK: # %bb.0: |
| 96 | +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu |
| 97 | +; CHECK-NEXT: vmandn.mm v9, v9, v0 |
| 98 | +; CHECK-NEXT: vmand.mm v8, v8, v0 |
| 99 | +; CHECK-NEXT: vmor.mm v0, v8, v9 |
| 100 | +; CHECK-NEXT: ret |
| 101 | + %v = call <vscale x 64 x i1> @llvm.vp.select.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, <vscale x 64 x i1> %c, i32 %evl) |
| 102 | + ret <vscale x 64 x i1> %v |
| 103 | +} |
| 104 | + |
7 | 105 | declare <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32)
|
8 | 106 |
|
9 | 107 | define <vscale x 1 x i8> @select_nxv1i8(<vscale x 1 x i1> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, i32 zeroext %evl) {
|
|
0 commit comments