From ffe0cc82dbaecde4fd1bb2bca5c6aea59be698a6 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 4 Apr 2022 19:47:06 +0100 Subject: [PATCH] [X86] Add XOR(X, MIN_SIGNED_VALUE) -> ADD(X, MIN_SIGNED_VALUE) isel patterns (PR52267) Improve chances of folding to LEA patterns Differential Revision: https://reviews.llvm.org/D123043 --- llvm/lib/Target/X86/X86InstrCompiler.td | 15 ++++++ llvm/test/CodeGen/X86/avx512-cmp.ll | 2 +- llvm/test/CodeGen/X86/combine-add-ssat.ll | 4 +- .../CodeGen/X86/horizontal-reduce-smin.ll | 52 +++++++++---------- llvm/test/CodeGen/X86/sadd_sat.ll | 18 +++---- llvm/test/CodeGen/X86/sadd_sat_plus.ll | 10 ++-- llvm/test/CodeGen/X86/sadd_sat_vec.ll | 4 +- llvm/test/CodeGen/X86/ssub_sat.ll | 2 +- llvm/test/CodeGen/X86/ssub_sat_plus.ll | 2 +- llvm/test/CodeGen/X86/vector-reduce-smin.ll | 30 +++++------ llvm/test/CodeGen/X86/xor-lea.ll | 40 +++++++------- 11 files changed, 97 insertions(+), 82 deletions(-) diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 4b2b7a947d04e5..39f27312c8ce00 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -1514,6 +1514,21 @@ def ADD64ri32_DB : I<0, Pseudo, } } // AddedComplexity, SchedRW +//===----------------------------------------------------------------------===// +// Pattern match XOR as ADD +//===----------------------------------------------------------------------===// + +// Prefer to pattern match XOR with min_signed_value as ADD at isel time. +// ADD can be 3-addressified into an LEA instruction to avoid copies. +let AddedComplexity = 5 in { +def : Pat<(xor GR8:$src1, -128), + (ADD8ri GR8:$src1, -128)>; +def : Pat<(xor GR16:$src1, -32768), + (ADD16ri GR16:$src1, -32768)>; +def : Pat<(xor GR32:$src1, -2147483648), + (ADD32ri GR32:$src1, -2147483648)>; +} + //===----------------------------------------------------------------------===// // Pattern match SUB as XOR //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/X86/avx512-cmp.ll b/llvm/test/CodeGen/X86/avx512-cmp.ll index c9f15724fa115f..06da8f7631d492 100644 --- a/llvm/test/CodeGen/X86/avx512-cmp.ll +++ b/llvm/test/CodeGen/X86/avx512-cmp.ll @@ -116,7 +116,7 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) { ; ALL-LABEL: test8: ; ALL: ## %bb.0: ; ALL-NEXT: notl %edi -; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000 +; ALL-NEXT: addl $-2147483648, %esi ## imm = 0x80000000 ; ALL-NEXT: testl %edx, %edx ; ALL-NEXT: movl $1, %eax ; ALL-NEXT: cmovel %eax, %edx diff --git a/llvm/test/CodeGen/X86/combine-add-ssat.ll b/llvm/test/CodeGen/X86/combine-add-ssat.ll index be52956a81065d..277a685131bf85 100644 --- a/llvm/test/CodeGen/X86/combine-add-ssat.ll +++ b/llvm/test/CodeGen/X86/combine-add-ssat.ll @@ -80,7 +80,7 @@ define i32 @combine_constant_i32(i32 %a0) { ; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: sarl $31, %eax -; CHECK-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; CHECK-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; CHECK-NEXT: incl %edi ; CHECK-NEXT: cmovnol %edi, %eax ; CHECK-NEXT: retq @@ -130,7 +130,7 @@ define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) { ; CHECK-NEXT: shrl $16, %esi ; CHECK-NEXT: leal (%rdi,%rsi), %eax ; CHECK-NEXT: sarl $31, %eax -; CHECK-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; CHECK-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; CHECK-NEXT: addl %edi, %esi ; CHECK-NEXT: cmovnol %esi, %eax ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll b/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll index 8e5bc10fb62561..1cfa043a8252f6 100644 --- a/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll +++ b/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll @@ -313,7 +313,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) { ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: xorb $-128, %al +; X86-SSE42-NEXT: addb $-128, %al ; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; @@ -324,7 +324,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) { ; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: xorb $-128, %al +; X86-AVX-NEXT: addb $-128, %al ; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: retl ; @@ -368,7 +368,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) { ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: xorb $-128, %al +; X64-SSE42-NEXT: addb $-128, %al ; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; @@ -379,7 +379,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) { ; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: xorb $-128, %al +; X64-AVX-NEXT: addb $-128, %al ; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> @@ -881,7 +881,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: xorb $-128, %al +; X86-SSE42-NEXT: addb $-128, %al ; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; @@ -894,7 +894,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: xorb $-128, %al +; X86-AVX1-NEXT: addb $-128, %al ; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl @@ -908,7 +908,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: xorb $-128, %al +; X86-AVX2-NEXT: addb $-128, %al ; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl @@ -959,7 +959,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: xorb $-128, %al +; X64-SSE42-NEXT: addb $-128, %al ; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; @@ -972,7 +972,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: xorb $-128, %al +; X64-AVX1-NEXT: addb $-128, %al ; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq @@ -986,7 +986,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: xorb $-128, %al +; X64-AVX2-NEXT: addb $-128, %al ; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq @@ -1000,7 +1000,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) { ; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: xorb $-128, %al +; X64-AVX512-NEXT: addb $-128, %al ; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq @@ -1667,7 +1667,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X86-SSE42-NEXT: pminub %xmm1, %xmm0 ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: xorb $-128, %al +; X86-SSE42-NEXT: addb $-128, %al ; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; @@ -1683,7 +1683,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: xorb $-128, %al +; X86-AVX1-NEXT: addb $-128, %al ; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl @@ -1698,7 +1698,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: xorb $-128, %al +; X86-AVX2-NEXT: addb $-128, %al ; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl @@ -1761,7 +1761,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X64-SSE42-NEXT: pminub %xmm1, %xmm0 ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: xorb $-128, %al +; X64-SSE42-NEXT: addb $-128, %al ; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; @@ -1777,7 +1777,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: xorb $-128, %al +; X64-AVX1-NEXT: addb $-128, %al ; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq @@ -1792,7 +1792,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: xorb $-128, %al +; X64-AVX2-NEXT: addb $-128, %al ; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq @@ -1808,7 +1808,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) { ; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: xorb $-128, %al +; X64-AVX512-NEXT: addb $-128, %al ; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq @@ -2033,7 +2033,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) { ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: xorb $-128, %al +; X86-SSE42-NEXT: addb $-128, %al ; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; @@ -2044,7 +2044,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) { ; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: xorb $-128, %al +; X86-AVX-NEXT: addb $-128, %al ; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: retl @@ -2089,7 +2089,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) { ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: xorb $-128, %al +; X64-SSE42-NEXT: addb $-128, %al ; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; @@ -2100,7 +2100,7 @@ define i8 @test_reduce_v32i8_v16i8(<32 x i8> %a0) { ; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: xorb $-128, %al +; X64-AVX-NEXT: addb $-128, %al ; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: vzeroupper ; X64-AVX-NEXT: retq @@ -2161,7 +2161,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) { ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: xorb $-128, %al +; X86-SSE42-NEXT: addb $-128, %al ; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; @@ -2172,7 +2172,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) { ; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: xorb $-128, %al +; X86-AVX-NEXT: addb $-128, %al ; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: retl @@ -2217,7 +2217,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) { ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: xorb $-128, %al +; X64-SSE42-NEXT: addb $-128, %al ; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; @@ -2228,7 +2228,7 @@ define i8 @test_reduce_v64i8_v16i8(<64 x i8> %a0) { ; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: xorb $-128, %al +; X64-AVX-NEXT: addb $-128, %al ; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: vzeroupper ; X64-AVX-NEXT: retq diff --git a/llvm/test/CodeGen/X86/sadd_sat.ll b/llvm/test/CodeGen/X86/sadd_sat.ll index a27988d787159a..30ccd08b52cf01 100644 --- a/llvm/test/CodeGen/X86/sadd_sat.ll +++ b/llvm/test/CodeGen/X86/sadd_sat.ll @@ -16,7 +16,7 @@ define i32 @func(i32 %x, i32 %y) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: leal (%eax,%ecx), %edx ; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: cmovol %edx, %eax ; X86-NEXT: retl @@ -27,7 +27,7 @@ define i32 @func(i32 %x, i32 %y) nounwind { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rsi), %eax ; X64-NEXT: sarl $31, %eax -; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X64-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; X64-NEXT: addl %esi, %edi ; X64-NEXT: cmovnol %edi, %eax ; X64-NEXT: retq @@ -48,7 +48,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind { ; X86-NEXT: sarl $31, %edx ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovnel %edx, %eax -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovel %ecx, %edx ; X86-NEXT: popl %ebx @@ -106,7 +106,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind { ; X86-NEXT: movl %eax, %edx ; X86-NEXT: addb %cl, %dl ; X86-NEXT: sarb $7, %dl -; X86-NEXT: xorb $-128, %dl +; X86-NEXT: addb $-128, %dl ; X86-NEXT: addb %cl, %al ; X86-NEXT: movzbl %al, %ecx ; X86-NEXT: movzbl %dl, %eax @@ -120,7 +120,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rsi), %eax ; X64-NEXT: sarb $7, %al -; X64-NEXT: xorb $-128, %al +; X64-NEXT: addb $-128, %al ; X64-NEXT: addb %sil, %dil ; X64-NEXT: movzbl %dil, %ecx ; X64-NEXT: movzbl %al, %eax @@ -172,28 +172,28 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%ecx,%eax), %esi ; X86-NEXT: sarl $31, %esi -; X86-NEXT: xorl $-2147483648, %esi # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %esi # imm = 0x80000000 ; X86-NEXT: addl %eax, %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: cmovol %esi, %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%edx,%eax), %edi ; X86-NEXT: sarl $31, %edi -; X86-NEXT: xorl $-2147483648, %edi # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edi # imm = 0x80000000 ; X86-NEXT: addl %eax, %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: cmovol %edi, %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%esi,%eax), %ebx ; X86-NEXT: sarl $31, %ebx -; X86-NEXT: xorl $-2147483648, %ebx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %ebx # imm = 0x80000000 ; X86-NEXT: addl %eax, %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: cmovol %ebx, %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%edi,%eax), %ebx ; X86-NEXT: sarl $31, %ebx -; X86-NEXT: xorl $-2147483648, %ebx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %ebx # imm = 0x80000000 ; X86-NEXT: addl %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: cmovol %ebx, %edi diff --git a/llvm/test/CodeGen/X86/sadd_sat_plus.ll b/llvm/test/CodeGen/X86/sadd_sat_plus.ll index 06799bd8862f4f..abb928421afa4f 100644 --- a/llvm/test/CodeGen/X86/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/X86/sadd_sat_plus.ll @@ -16,7 +16,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: leal (%eax,%ecx), %edx ; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: cmovol %edx, %eax ; X86-NEXT: retl @@ -28,7 +28,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; X64-NEXT: imull %edx, %esi ; X64-NEXT: leal (%rdi,%rsi), %eax ; X64-NEXT: sarl $31, %eax -; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X64-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; X64-NEXT: addl %edi, %esi ; X64-NEXT: cmovnol %esi, %eax ; X64-NEXT: retq @@ -50,7 +50,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind { ; X86-NEXT: sarl $31, %edx ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovnel %edx, %eax -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovel %ecx, %edx ; X86-NEXT: popl %ebx @@ -113,7 +113,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y, i8 signext %z) nounwind { ; X86-NEXT: movl %eax, %edx ; X86-NEXT: addb %cl, %dl ; X86-NEXT: sarb $7, %dl -; X86-NEXT: xorb $-128, %dl +; X86-NEXT: addb $-128, %dl ; X86-NEXT: addb %cl, %al ; X86-NEXT: movzbl %al, %ecx ; X86-NEXT: movzbl %dl, %eax @@ -130,7 +130,7 @@ define signext i8 @func8(i8 signext %x, i8 signext %y, i8 signext %z) nounwind { ; X64-NEXT: # kill: def $al killed $al def $rax ; X64-NEXT: leal (%rdi,%rax), %ecx ; X64-NEXT: sarb $7, %cl -; X64-NEXT: xorb $-128, %cl +; X64-NEXT: addb $-128, %cl ; X64-NEXT: addb %al, %dil ; X64-NEXT: movzbl %dil, %edx ; X64-NEXT: movzbl %cl, %eax diff --git a/llvm/test/CodeGen/X86/sadd_sat_vec.ll b/llvm/test/CodeGen/X86/sadd_sat_vec.ll index e83209a870e28b..025ddcd48cf343 100644 --- a/llvm/test/CodeGen/X86/sadd_sat_vec.ll +++ b/llvm/test/CodeGen/X86/sadd_sat_vec.ll @@ -433,7 +433,7 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind { ; SSE-NEXT: movb (%rsi), %cl ; SSE-NEXT: leal (%rax,%rcx), %esi ; SSE-NEXT: sarb $7, %sil -; SSE-NEXT: xorb $-128, %sil +; SSE-NEXT: addb $-128, %sil ; SSE-NEXT: addb %cl, %al ; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movzbl %sil, %ecx @@ -447,7 +447,7 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind { ; AVX-NEXT: movb (%rsi), %cl ; AVX-NEXT: leal (%rax,%rcx), %esi ; AVX-NEXT: sarb $7, %sil -; AVX-NEXT: xorb $-128, %sil +; AVX-NEXT: addb $-128, %sil ; AVX-NEXT: addb %cl, %al ; AVX-NEXT: movzbl %al, %eax ; AVX-NEXT: movzbl %sil, %ecx diff --git a/llvm/test/CodeGen/X86/ssub_sat.ll b/llvm/test/CodeGen/X86/ssub_sat.ll index 78474ff179856a..be8926b5d948aa 100644 --- a/llvm/test/CodeGen/X86/ssub_sat.ll +++ b/llvm/test/CodeGen/X86/ssub_sat.ll @@ -48,7 +48,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind { ; X86-NEXT: sarl $31, %edx ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovnel %edx, %eax -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovel %ecx, %edx ; X86-NEXT: popl %ebx diff --git a/llvm/test/CodeGen/X86/ssub_sat_plus.ll b/llvm/test/CodeGen/X86/ssub_sat_plus.ll index 51cd25c7885a8a..45e01e5610afdc 100644 --- a/llvm/test/CodeGen/X86/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/X86/ssub_sat_plus.ll @@ -50,7 +50,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind { ; X86-NEXT: sarl $31, %edx ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovnel %edx, %eax -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: testb %bl, %bl ; X86-NEXT: cmovel %ecx, %edx ; X86-NEXT: popl %ebx diff --git a/llvm/test/CodeGen/X86/vector-reduce-smin.ll b/llvm/test/CodeGen/X86/vector-reduce-smin.ll index 16d58b7742ab22..b69def9247c1e5 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-smin.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-smin.ll @@ -1660,7 +1660,7 @@ define i8 @test_v16i8(<16 x i8> %a0) { ; SSE4-NEXT: pminub %xmm0, %xmm1 ; SSE4-NEXT: phminposuw %xmm1, %xmm0 ; SSE4-NEXT: movd %xmm0, %eax -; SSE4-NEXT: xorb $-128, %al +; SSE4-NEXT: addb $-128, %al ; SSE4-NEXT: # kill: def $al killed $al killed $eax ; SSE4-NEXT: retq ; @@ -1671,7 +1671,7 @@ define i8 @test_v16i8(<16 x i8> %a0) { ; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vphminposuw %xmm0, %xmm0 ; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: xorb $-128, %al +; AVX-NEXT: addb $-128, %al ; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq ; @@ -1682,7 +1682,7 @@ define i8 @test_v16i8(<16 x i8> %a0) { ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: xorb $-128, %al +; AVX512-NEXT: addb $-128, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: retq %1 = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a0) @@ -1736,7 +1736,7 @@ define i8 @test_v32i8(<32 x i8> %a0) { ; SSE4-NEXT: pminub %xmm0, %xmm1 ; SSE4-NEXT: phminposuw %xmm1, %xmm0 ; SSE4-NEXT: movd %xmm0, %eax -; SSE4-NEXT: xorb $-128, %al +; SSE4-NEXT: addb $-128, %al ; SSE4-NEXT: # kill: def $al killed $al killed $eax ; SSE4-NEXT: retq ; @@ -1749,7 +1749,7 @@ define i8 @test_v32i8(<32 x i8> %a0) { ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: xorb $-128, %al +; AVX1-NEXT: addb $-128, %al ; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1763,7 +1763,7 @@ define i8 @test_v32i8(<32 x i8> %a0) { ; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: xorb $-128, %al +; AVX2-NEXT: addb $-128, %al ; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -1777,7 +1777,7 @@ define i8 @test_v32i8(<32 x i8> %a0) { ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: xorb $-128, %al +; AVX512-NEXT: addb $-128, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq @@ -1844,7 +1844,7 @@ define i8 @test_v64i8(<64 x i8> %a0) { ; SSE4-NEXT: pminub %xmm1, %xmm0 ; SSE4-NEXT: phminposuw %xmm0, %xmm0 ; SSE4-NEXT: movd %xmm0, %eax -; SSE4-NEXT: xorb $-128, %al +; SSE4-NEXT: addb $-128, %al ; SSE4-NEXT: # kill: def $al killed $al killed $eax ; SSE4-NEXT: retq ; @@ -1860,7 +1860,7 @@ define i8 @test_v64i8(<64 x i8> %a0) { ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: xorb $-128, %al +; AVX1-NEXT: addb $-128, %al ; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1875,7 +1875,7 @@ define i8 @test_v64i8(<64 x i8> %a0) { ; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: xorb $-128, %al +; AVX2-NEXT: addb $-128, %al ; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -1891,7 +1891,7 @@ define i8 @test_v64i8(<64 x i8> %a0) { ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: xorb $-128, %al +; AVX512-NEXT: addb $-128, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq @@ -1982,7 +1982,7 @@ define i8 @test_v128i8(<128 x i8> %a0) { ; SSE4-NEXT: pminub %xmm2, %xmm0 ; SSE4-NEXT: phminposuw %xmm0, %xmm0 ; SSE4-NEXT: movd %xmm0, %eax -; SSE4-NEXT: xorb $-128, %al +; SSE4-NEXT: addb $-128, %al ; SSE4-NEXT: # kill: def $al killed $al killed $eax ; SSE4-NEXT: retq ; @@ -2004,7 +2004,7 @@ define i8 @test_v128i8(<128 x i8> %a0) { ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: xorb $-128, %al +; AVX1-NEXT: addb $-128, %al ; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -2021,7 +2021,7 @@ define i8 @test_v128i8(<128 x i8> %a0) { ; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: xorb $-128, %al +; AVX2-NEXT: addb $-128, %al ; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2038,7 +2038,7 @@ define i8 @test_v128i8(<128 x i8> %a0) { ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: xorb $-128, %al +; AVX512-NEXT: addb $-128, %al ; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq diff --git a/llvm/test/CodeGen/X86/xor-lea.ll b/llvm/test/CodeGen/X86/xor-lea.ll index a4d59207a58f58..d0e3e1a5221832 100644 --- a/llvm/test/CodeGen/X86/xor-lea.ll +++ b/llvm/test/CodeGen/X86/xor-lea.ll @@ -16,13 +16,13 @@ define i8 @xor_sminval_i8(i8 %x) { ; X86-LABEL: xor_sminval_i8: ; X86: # %bb.0: ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: xorb $-128, %al +; X86-NEXT: addb $-128, %al ; X86-NEXT: retl ; ; X64-LABEL: xor_sminval_i8: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: xorb $-128, %al +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal -128(%rdi), %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %r = xor i8 %x, 128 @@ -74,8 +74,8 @@ define i32 @xor_sminval_i32(i32 %x) { ; ; X64-LABEL: xor_sminval_i32: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal -2147483648(%rdi), %eax ; X64-NEXT: retq %r = xor i32 %x, 2147483648 ret i32 %r @@ -124,7 +124,7 @@ define i8 @xor_add_sminval_i8(i8 %x, i8 %y) { ; X86: # %bb.0: ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: addb {{[0-9]+}}(%esp), %al -; X86-NEXT: xorb $-128, %al +; X86-NEXT: addb $-128, %al ; X86-NEXT: retl ; ; X64-LABEL: xor_add_sminval_i8: @@ -132,7 +132,7 @@ define i8 @xor_add_sminval_i8(i8 %x, i8 %y) { ; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rsi), %eax -; X64-NEXT: xorb $-128, %al +; X64-NEXT: addb $-128, %al ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %s = add i8 %x, %y @@ -166,14 +166,14 @@ define i32 @xor_add_sminval_i32(i32 %x) { ; X86: # %bb.0: ; X86-NEXT: movl $512, %eax # imm = 0x200 ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax -; X86-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; X86-NEXT: retl ; ; X64-LABEL: xor_add_sminval_i32: ; X64: # %bb.0: ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 512(%rdi), %eax -; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X64-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; X64-NEXT: retq %s = add i32 %x, 512 %r = xor i32 %s, 2147483648 @@ -187,7 +187,7 @@ define i64 @xor_add_sminval_i64(i64 %x, i64 %y) { ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax ; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: retl ; ; X64-LABEL: xor_add_sminval_i64: @@ -209,14 +209,14 @@ define i8 @sub_xor_sminval_i8(i8 %x, i8 %y) { ; X86-LABEL: sub_xor_sminval_i8: ; X86: # %bb.0: ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: xorb $-128, %al +; X86-NEXT: addb $-128, %al ; X86-NEXT: subb {{[0-9]+}}(%esp), %al ; X86-NEXT: retl ; ; X64-LABEL: sub_xor_sminval_i8: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: xorb $-128, %al +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal -128(%rdi), %eax ; X64-NEXT: subb %sil, %al ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq @@ -295,14 +295,14 @@ define i8 @xor_shl_sminval_i8(i8 %x) { ; X86: # %bb.0: ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: addb %al, %al -; X86-NEXT: xorb $-128, %al +; X86-NEXT: addb $-128, %al ; X86-NEXT: retl ; ; X64-LABEL: xor_shl_sminval_i8: ; X64: # %bb.0: ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi), %eax -; X64-NEXT: xorb $-128, %al +; X64-NEXT: addb $-128, %al ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %s = shl i8 %x, 1 @@ -354,14 +354,14 @@ define i32 @xor_bigshl_sminval_i32(i32 %x) { ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $8, %eax -; X86-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %eax # imm = 0x80000000 ; X86-NEXT: retl ; ; X64-LABEL: xor_bigshl_sminval_i32: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: shll $8, %eax -; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000 +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: shll $8, %edi +; X64-NEXT: leal -2147483648(%rdi), %eax ; X64-NEXT: retq %s = shl i32 %x, 8 %r = xor i32 %s, 2147483648 @@ -375,7 +375,7 @@ define i64 @xor_shl_sminval_i64(i64 %x) { ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: shldl $2, %eax, %edx ; X86-NEXT: shll $2, %eax -; X86-NEXT: xorl $-2147483648, %edx # imm = 0x80000000 +; X86-NEXT: addl $-2147483648, %edx # imm = 0x80000000 ; X86-NEXT: retl ; ; X64-LABEL: xor_shl_sminval_i64: