Skip to content

Commit

Permalink
[X86] Add XOR(X, MIN_SIGNED_VALUE) -> ADD(X, MIN_SIGNED_VALUE) isel p…
Browse files Browse the repository at this point in the history
…atterns (PR52267)

Improve chances of folding to LEA patterns

Differential Revision: https://reviews.llvm.org/D123043
  • Loading branch information
RKSimon committed Apr 4, 2022
1 parent e394c4e commit ffe0cc8
Show file tree
Hide file tree
Showing 11 changed files with 97 additions and 82 deletions.
15 changes: 15 additions & 0 deletions llvm/lib/Target/X86/X86InstrCompiler.td
Original file line number Diff line number Diff line change
Expand Up @@ -1514,6 +1514,21 @@ def ADD64ri32_DB : I<0, Pseudo,
}
} // AddedComplexity, SchedRW

//===----------------------------------------------------------------------===//
// Pattern match XOR as ADD
//===----------------------------------------------------------------------===//

// Prefer to pattern match XOR with min_signed_value as ADD at isel time.
// ADD can be 3-addressified into an LEA instruction to avoid copies.
let AddedComplexity = 5 in {
def : Pat<(xor GR8:$src1, -128),
(ADD8ri GR8:$src1, -128)>;
def : Pat<(xor GR16:$src1, -32768),
(ADD16ri GR16:$src1, -32768)>;
def : Pat<(xor GR32:$src1, -2147483648),
(ADD32ri GR32:$src1, -2147483648)>;
}

//===----------------------------------------------------------------------===//
// Pattern match SUB as XOR
//===----------------------------------------------------------------------===//
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/avx512-cmp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; ALL-LABEL: test8:
; ALL: ## %bb.0:
; ALL-NEXT: notl %edi
; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: addl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: testl %edx, %edx
; ALL-NEXT: movl $1, %eax
; ALL-NEXT: cmovel %eax, %edx
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/combine-add-ssat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ define i32 @combine_constant_i32(i32 %a0) {
; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: sarl $31, %eax
; CHECK-NEXT: xorl $-2147483648, %eax # imm = 0x80000000
; CHECK-NEXT: addl $-2147483648, %eax # imm = 0x80000000
; CHECK-NEXT: incl %edi
; CHECK-NEXT: cmovnol %edi, %eax
; CHECK-NEXT: retq
Expand Down Expand Up @@ -130,7 +130,7 @@ define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
; CHECK-NEXT: shrl $16, %esi
; CHECK-NEXT: leal (%rdi,%rsi), %eax
; CHECK-NEXT: sarl $31, %eax
; CHECK-NEXT: xorl $-2147483648, %eax # imm = 0x80000000
; CHECK-NEXT: addl $-2147483648, %eax # imm = 0x80000000
; CHECK-NEXT: addl %edi, %esi
; CHECK-NEXT: cmovnol %esi, %eax
; CHECK-NEXT: retq
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/X86/horizontal-reduce-smin.ll
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: xorb $-128, %al
; X86-SSE42-NEXT: addb $-128, %al
; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
Expand All @@ -324,7 +324,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
; X86-AVX-NEXT: xorb $-128, %al
; X86-AVX-NEXT: addb $-128, %al
; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: retl
;
Expand Down Expand Up @@ -368,7 +368,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: xorb $-128, %al
; X64-SSE42-NEXT: addb $-128, %al
; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
Expand All @@ -379,7 +379,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
; X64-AVX-NEXT: xorb $-128, %al
; X64-AVX-NEXT: addb $-128, %al
; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
Expand Down Expand Up @@ -881,7 +881,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: xorb $-128, %al
; X86-SSE42-NEXT: addb $-128, %al
; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
Expand All @@ -894,7 +894,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
; X86-AVX1-NEXT: xorb $-128, %al
; X86-AVX1-NEXT: addb $-128, %al
; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
Expand All @@ -908,7 +908,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: xorb $-128, %al
; X86-AVX2-NEXT: addb $-128, %al
; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
Expand Down Expand Up @@ -959,7 +959,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: xorb $-128, %al
; X64-SSE42-NEXT: addb $-128, %al
; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
Expand All @@ -972,7 +972,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
; X64-AVX1-NEXT: xorb $-128, %al
; X64-AVX1-NEXT: addb $-128, %al
; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
Expand All @@ -986,7 +986,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
; X64-AVX2-NEXT: xorb $-128, %al
; X64-AVX2-NEXT: addb $-128, %al
; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
Expand All @@ -1000,7 +1000,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
; X64-AVX512-NEXT: xorb $-128, %al
; X64-AVX512-NEXT: addb $-128, %al
; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
Expand Down Expand Up @@ -1667,7 +1667,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: xorb $-128, %al
; X86-SSE42-NEXT: addb $-128, %al
; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
Expand All @@ -1683,7 +1683,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
; X86-AVX1-NEXT: xorb $-128, %al
; X86-AVX1-NEXT: addb $-128, %al
; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
Expand All @@ -1698,7 +1698,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: xorb $-128, %al
; X86-AVX2-NEXT: addb $-128, %al
; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
Expand Down Expand Up @@ -1761,7 +1761,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: xorb $-128, %al
; X64-SSE42-NEXT: addb $-128, %al
; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
Expand All @@ -1777,7 +1777,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
; X64-AVX1-NEXT: xorb $-128, %al
; X64-AVX1-NEXT: addb $-128, %al
; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
Expand All @@ -1792,7 +1792,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
; X64-AVX2-NEXT: xorb $-128, %al
; X64-AVX2-NEXT: addb $-128, %al
; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
Expand All @@ -1808,7 +1808,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
; X64-AVX512-NEXT: xorb $-128, %al
; X64-AVX512-NEXT: addb $-128, %al
; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
Expand Down Expand Up @@ -2033,7 +2033,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: xorb $-128, %al
; X86-SSE42-NEXT: addb $-128, %al
; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
Expand All @@ -2044,7 +2044,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) {
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
; X86-AVX-NEXT: xorb $-128, %al
; X86-AVX-NEXT: addb $-128, %al
; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
Expand Down Expand Up @@ -2089,7 +2089,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: xorb $-128, %al
; X64-SSE42-NEXT: addb $-128, %al
; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
Expand All @@ -2100,7 +2100,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) {
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
; X64-AVX-NEXT: xorb $-128, %al
; X64-AVX-NEXT: addb $-128, %al
; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
Expand Down Expand Up @@ -2161,7 +2161,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: xorb $-128, %al
; X86-SSE42-NEXT: addb $-128, %al
; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
Expand All @@ -2172,7 +2172,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) {
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
; X86-AVX-NEXT: xorb $-128, %al
; X86-AVX-NEXT: addb $-128, %al
; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
Expand Down Expand Up @@ -2217,7 +2217,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: xorb $-128, %al
; X64-SSE42-NEXT: addb $-128, %al
; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
Expand All @@ -2228,7 +2228,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) {
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
; X64-AVX-NEXT: xorb $-128, %al
; X64-AVX-NEXT: addb $-128, %al
; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/X86/sadd_sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%eax,%ecx), %edx
; X86-NEXT: sarl $31, %edx
; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: cmovol %edx, %eax
; X86-NEXT: retl
Expand All @@ -27,7 +27,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rsi), %eax
; X64-NEXT: sarl $31, %eax
; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000
; X64-NEXT: addl $-2147483648, %eax # imm = 0x80000000
; X64-NEXT: addl %esi, %edi
; X64-NEXT: cmovnol %edi, %eax
; X64-NEXT: retq
Expand All @@ -48,7 +48,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; X86-NEXT: sarl $31, %edx
; X86-NEXT: testb %bl, %bl
; X86-NEXT: cmovnel %edx, %eax
; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000
; X86-NEXT: testb %bl, %bl
; X86-NEXT: cmovel %ecx, %edx
; X86-NEXT: popl %ebx
Expand Down Expand Up @@ -106,7 +106,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
; X86-NEXT: movl %eax, %edx
; X86-NEXT: addb %cl, %dl
; X86-NEXT: sarb $7, %dl
; X86-NEXT: xorb $-128, %dl
; X86-NEXT: addb $-128, %dl
; X86-NEXT: addb %cl, %al
; X86-NEXT: movzbl %al, %ecx
; X86-NEXT: movzbl %dl, %eax
Expand All @@ -120,7 +120,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rsi), %eax
; X64-NEXT: sarb $7, %al
; X64-NEXT: xorb $-128, %al
; X64-NEXT: addb $-128, %al
; X64-NEXT: addb %sil, %dil
; X64-NEXT: movzbl %dil, %ecx
; X64-NEXT: movzbl %al, %eax
Expand Down Expand Up @@ -172,28 +172,28 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%ecx,%eax), %esi
; X86-NEXT: sarl $31, %esi
; X86-NEXT: xorl $-2147483648, %esi # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %esi # imm = 0x80000000
; X86-NEXT: addl %eax, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: cmovol %esi, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%edx,%eax), %edi
; X86-NEXT: sarl $31, %edi
; X86-NEXT: xorl $-2147483648, %edi # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %edi # imm = 0x80000000
; X86-NEXT: addl %eax, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: cmovol %edi, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%esi,%eax), %ebx
; X86-NEXT: sarl $31, %ebx
; X86-NEXT: xorl $-2147483648, %ebx # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %ebx # imm = 0x80000000
; X86-NEXT: addl %eax, %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: cmovol %ebx, %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%edi,%eax), %ebx
; X86-NEXT: sarl $31, %ebx
; X86-NEXT: xorl $-2147483648, %ebx # imm = 0x80000000
; X86-NEXT: addl $-2147483648, %ebx # imm = 0x80000000
; X86-NEXT: addl %eax, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmovol %ebx, %edi
Expand Down
Loading

0 comments on commit ffe0cc8

Please sign in to comment.