We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2f8a888 commit b919f2eCopy full SHA for b919f2e
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1782,6 +1782,12 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
1782
LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1783
<< static_cast<int>(Fold.UseOpNo) << " of "
1784
<< *Fold.UseMI);
1785
+
1786
+ if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
1787
+ LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
1788
+ Changed = true;
1789
+ }
1790
1791
} else if (Fold.Commuted) {
1792
// Restoring instruction's original operand order if fold has failed.
1793
TII->commuteInstruction(*Fold.UseMI, false);
llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
@@ -105,9 +105,8 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
105
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
106
; CHECK-NEXT: ; %bb.1: ; %inc
107
; CHECK-NEXT: v_not_b32_e32 v2, v4
108
-; CHECK-NEXT: v_not_b32_e32 v3, 0
109
; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
110
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
111
; CHECK-NEXT: ; %bb.2: ; %UnifiedReturnBlock
112
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
113
; CHECK-NEXT: v_mov_b32_e32 v0, v2
llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -973,7 +973,7 @@ body: |
973
; GCN: liveins: $vgpr0, $vgpr1
974
; GCN-NEXT: {{ $}}
975
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
976
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 0, [[COPY]], implicit $exec
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
977
; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]]
978
%0:vgpr_32 = COPY $vgpr0
979
%1:vgpr_32 = COPY $vgpr1
llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -43,8 +43,7 @@ body: |
43
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
44
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
45
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
46
- ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec
47
- ; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
+ ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
48
%0:vgpr_32 = IMPLICIT_DEF
49
%1:vgpr_32 = IMPLICIT_DEF
50
%2:vgpr_32 = IMPLICIT_DEF
llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
@@ -8,8 +8,8 @@ body: |
8
; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg
9
; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
10
; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1
11
- ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec
12
- ; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]]
+ ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]]
13
%0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
14
%1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
15
%2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec
llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -404,12 +404,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
404
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
405
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0
406
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc
407
-; GCN-IR-NEXT: v_not_b32_e32 v5, v10
+; GCN-IR-NEXT: v_not_b32_e32 v4, v10
408
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8
409
-; GCN-IR-NEXT: v_not_b32_e32 v4, 0
410
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v5, v11
+; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11
411
; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
412
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v4, vcc
+; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
413
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
414
; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
415
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -380,12 +380,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
380
381
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2
382
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
383
-; GCN-IR-NEXT: v_not_b32_e32 v7, v12
+; GCN-IR-NEXT: v_not_b32_e32 v6, v12
384
; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
385
-; GCN-IR-NEXT: v_not_b32_e32 v6, 0
386
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
387
; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
388
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc
+; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
389
390
; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
391
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -348,10 +348,9 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
348
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10
349
; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
350
; GCN-IR-NEXT: v_not_b32_e32 v0, v14
351
-; GCN-IR-NEXT: v_not_b32_e32 v1, 0
352
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15
353
354
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
355
356
357
llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -355,12 +355,11 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
358
359
360
361
362
363
364
365
366
0 commit comments