-
Notifications
You must be signed in to change notification settings - Fork 13.6k
AMDGPU: Move bf16 copysign tests to separate file #142114
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
arsenm
merged 2 commits into
main
from
users/arsenm/amdgpu/move-bf16-copysign-tests-separate-file
May 30, 2025
Merged
AMDGPU: Move bf16 copysign tests to separate file #142114
arsenm
merged 2 commits into
main
from
users/arsenm/amdgpu/move-bf16-copysign-tests-separate-file
May 30, 2025
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This was referenced May 30, 2025
@llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesMake symmetric with other copysign tests Patch is 35.67 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/142114.diff 1 Files Affected:
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
new file mode 100644
index 0000000000000..4fcce8a6d623f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
@@ -0,0 +1,959 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=amdgcn | FileCheck %s -check-prefixes=GCN
+; RUN: llc < %s -mtriple=amdgcn -mcpu=hawaii | FileCheck %s -check-prefixes=GFX7
+; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck %s -check-prefixes=GFX8
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx900 | FileCheck %s -check-prefixes=GFX9
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1010 | FileCheck %s -check-prefixes=GFX10
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11FAKE16
+
+declare bfloat @llvm.copysign.bf16(bfloat, bfloat)
+
+define bfloat @v_copysign_bf16_bf16(bfloat %mag, bfloat %sign) {
+; GCN-LABEL: v_copysign_bf16_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0x80000000, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_bf16_bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0x80000000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_bf16_bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_bf16_bf16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_bf16_bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_bf16_bf16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define bfloat @v_copysign_bf16_s_bf16(bfloat %mag, bfloat inreg %sign) {
+; GCN-LABEL: v_copysign_bf16_s_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: s_and_b32 s4, s16, 0x80000000
+; GCN-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, s4, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_bf16_s_bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: s_and_b32 s4, s16, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s4, s4, 16
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_bf16_s_bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_mov_b32_e32 v1, s16
+; GFX8-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_bf16_s_bf16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mov_b32_e32 v1, s16
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_bf16_s_bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, s16
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_bf16_s_bf16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define bfloat @v_copysign_s_bf16_bf16(bfloat inreg %mag, bfloat %sign) {
+; GCN-LABEL: v_copysign_s_bf16_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e64 v1, 1.0, s16
+; GCN-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GCN-NEXT: v_bfe_u32 v1, v1, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, v1, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_s_bf16_bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s16
+; GFX7-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_s_bf16_bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_mov_b32_e32 v1, s16
+; GFX8-NEXT: v_bfi_b32 v0, s4, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_s_bf16_bf16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mov_b32_e32 v1, s16
+; GFX9-NEXT: v_bfi_b32 v0, s4, v1, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_s_bf16_bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, s16, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_s_bf16_bf16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define bfloat @v_copysign_bf16_f32(bfloat %mag, float %sign.f32) {
+; GCN-LABEL: v_copysign_bf16_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0x80000000, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_bf16_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0x80000000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_bf16_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_bf16_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_bf16_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_bf16_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %sign = fptrunc float %sign.f32 to bfloat
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define bfloat @v_copysign_bf16_f64(bfloat %mag, double %sign.f64) {
+; GCN-LABEL: v_copysign_bf16_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0x80000000, v2
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_bf16_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0x80000000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_bf16_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_bf16_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_bf16_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_bf16_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %sign = fptrunc double %sign.f64 to bfloat
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define bfloat @v_copysign_bf16_f16(bfloat %mag, half %sign.f16) {
+; GCN-LABEL: v_copysign_bf16_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0x8000, v1
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_copysign_bf16_f16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_copysign_bf16_f16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_bf16_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_copysign_bf16_f16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_copysign_bf16_f16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %sign = bitcast half %sign.f16 to bfloat
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ ret bfloat %op
+}
+
+define amdgpu_ps i32 @s_copysign_bf16_bf16(bfloat inreg %mag, bfloat inreg %sign) {
+; GCN-LABEL: s_copysign_bf16_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT: s_and_b32 s0, s1, 0x80000000
+; GCN-NEXT: s_lshr_b32 s0, s0, 16
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, s0, v0
+; GCN-NEXT: v_readfirstlane_b32 s0, v0
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX7-LABEL: s_copysign_bf16_bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT: s_and_b32 s0, s1, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s0, s0, 16
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
+; GFX7-NEXT: v_readfirstlane_b32 s0, v0
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_copysign_bf16_bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_movk_i32 s2, 0x7fff
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_bfi_b32 v0, s2, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_copysign_bf16_bf16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_movk_i32 s2, 0x7fff
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_bfi_b32 v0, s2, v0, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_copysign_bf16_bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v0, s1
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: s_copysign_bf16_bf16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v0, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: ; return to shader part epilog
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ %cast = bitcast bfloat %op to i16
+ %zext = zext i16 %cast to i32
+ %readlane = call i32 @llvm.amdgcn.readfirstlane(i32 %zext)
+ ret i32 %readlane
+}
+
+define amdgpu_ps i32 @s_copysign_bf16_f32(bfloat inreg %mag, float inreg %sign.f32) {
+; GCN-LABEL: s_copysign_bf16_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT: s_and_b32 s0, s1, 0x80000000
+; GCN-NEXT: s_lshr_b32 s0, s0, 16
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, s0, v0
+; GCN-NEXT: v_readfirstlane_b32 s0, v0
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX7-LABEL: s_copysign_bf16_f32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT: s_and_b32 s0, s1, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s0, s0, 16
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
+; GFX7-NEXT: v_readfirstlane_b32 s0, v0
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_copysign_bf16_f32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_lshrrev_b32_e64 v0, 16, s1
+; GFX8-NEXT: s_movk_i32 s1, 0x7fff
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_bfi_b32 v0, s1, v1, v0
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_copysign_bf16_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_lshrrev_b32_e64 v0, 16, s1
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_bfi_b32 v0, s1, v1, v0
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_copysign_bf16_f32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_lshrrev_b32_e64 v0, 16, s1
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: s_copysign_bf16_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_lshrrev_b32_e64 v0, 16, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: ; return to shader part epilog
+ %sign = fptrunc float %sign.f32 to bfloat
+ %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign)
+ %cast = bitcast bfloat %op to i16
+ %zext = zext i16 %cast to i32
+ %readlane = call i32 @llvm.amdgcn.readfirstlane(i32 %zext)
+ ret i32 %readlane
+}
+
+define amdgpu_ps i32 @s_copysign_bf16_f64(bfloat inreg %mag, double inreg %sign.f64) {
+; GCN-LABEL: s_copysign_bf16_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT: s_and_b32 s0, s2, 0x80000000
+; GCN-NEXT: s_lshr_b32 s0, s0, 16
+; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GCN-NEXT: v_or_b32_e32 v0, s0, v0
+; GCN-NEXT: v_readfirstlane_b32 s0, v0
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX7-LABEL: s_copysign_bf16_f64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT: s_and_b32 s0, s2, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s0, s0, 16
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
+; GFX7-NEXT: v_readfirstlane_b32 s0, v0
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_copysign_bf16_f64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_lshrrev_b32_e64 v0, 16, s2
+; GFX8-NEXT: s_movk_i32 s1, 0x7fff
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_bfi_b32 v0, s1, v1, v0
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_copysign_bf16_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_lshrrev_b32_e64 v0, 16, s2
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_bfi_b32 v0, s1, v1, v0
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_copysign_bf16_f64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_lshrrev_b32_e64 v0, 16, s2
+; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: s_copysign_bf16_f64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_lshrrev_b32_e64 v0, 16, s2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s0, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: ; return to shader part epilog
+ %s...
[truncated]
|
This was referenced May 30, 2025
Is it a "move" or adds new tests? There doesn't seem to be any delete. |
This was referenced May 30, 2025
rampitec
approved these changes
May 30, 2025
8bfa014
to
9538c1f
Compare
Base automatically changed from
users/arsenm/amdgpu/avoid-kernels-fcopysign-f16-test
to
main
May 30, 2025 17:49
Make symmetric with other copysign tests
5069193
to
90a16d1
Compare
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Make symmetric with other copysign tests