| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE64 %s |
| ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1031 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE32 %s |
| |
| ; End to end tests for scalar vs. vector boolean legalization strategies. |
| |
| define amdgpu_ps float @select_vgpr_sgpr_trunc_cond(i32 inreg %a, i32 %b, i32 %c) { |
| ; WAVE64-LABEL: select_vgpr_sgpr_trunc_cond: |
| ; WAVE64: ; %bb.0: |
| ; WAVE64-NEXT: s_and_b32 s0, 1, s0 |
| ; WAVE64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 |
| ; WAVE64-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc |
| ; WAVE64-NEXT: ; return to shader part epilog |
| ; |
| ; WAVE32-LABEL: select_vgpr_sgpr_trunc_cond: |
| ; WAVE32: ; %bb.0: |
| ; WAVE32-NEXT: s_and_b32 s0, 1, s0 |
| ; WAVE32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 |
| ; WAVE32-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo |
| ; WAVE32-NEXT: ; return to shader part epilog |
| %cc = trunc i32 %a to i1 |
| %r = select i1 %cc, i32 %b, i32 %c |
| %r.f = bitcast i32 %r to float |
| ret float %r.f |
| } |
| |
| define amdgpu_ps float @select_vgpr_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 %b, i32 %c) { |
| ; WAVE64-LABEL: select_vgpr_sgpr_trunc_and_cond: |
| ; WAVE64: ; %bb.0: |
| ; WAVE64-NEXT: s_and_b32 s0, s0, s1 |
| ; WAVE64-NEXT: s_and_b32 s0, 1, s0 |
| ; WAVE64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 |
| ; WAVE64-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc |
| ; WAVE64-NEXT: ; return to shader part epilog |
| ; |
| ; WAVE32-LABEL: select_vgpr_sgpr_trunc_and_cond: |
| ; WAVE32: ; %bb.0: |
| ; WAVE32-NEXT: s_and_b32 s0, s0, s1 |
| ; WAVE32-NEXT: s_and_b32 s0, 1, s0 |
| ; WAVE32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 |
| ; WAVE32-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo |
| ; WAVE32-NEXT: ; return to shader part epilog |
| %cc.0 = trunc i32 %a.0 to i1 |
| %cc.1 = trunc i32 %a.1 to i1 |
| %and = and i1 %cc.0, %cc.1 |
| %r = select i1 %and, i32 %b, i32 %c |
| %r.f = bitcast i32 %r to float |
| ret float %r.f |
| } |
| |
| define amdgpu_ps i32 @select_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 inreg %b, i32 inreg %c) { |
| ; GCN-LABEL: select_sgpr_trunc_and_cond: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_and_b32 s0, s0, s1 |
| ; GCN-NEXT: s_and_b32 s0, s0, 1 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 0 |
| ; GCN-NEXT: s_cselect_b32 s0, s2, s3 |
| ; GCN-NEXT: ; return to shader part epilog |
| %cc.0 = trunc i32 %a.0 to i1 |
| %cc.1 = trunc i32 %a.1 to i1 |
| %and = and i1 %cc.0, %cc.1 |
| %r = select i1 %and, i32 %b, i32 %c |
| ret i32 %r |
| } |
| |
| define amdgpu_kernel void @sgpr_trunc_brcond(i32 %cond) { |
| ; WAVE64-LABEL: sgpr_trunc_brcond: |
| ; WAVE64: ; %bb.0: ; %entry |
| ; WAVE64-NEXT: s_load_dword s0, s[4:5], 0x24 |
| ; WAVE64-NEXT: s_waitcnt lgkmcnt(0) |
| ; WAVE64-NEXT: s_xor_b32 s0, s0, 1 |
| ; WAVE64-NEXT: s_and_b32 s0, s0, 1 |
| ; WAVE64-NEXT: s_cmp_lg_u32 s0, 0 |
| ; WAVE64-NEXT: s_cbranch_scc1 .LBB3_2 |
| ; WAVE64-NEXT: ; %bb.1: ; %bb0 |
| ; WAVE64-NEXT: v_mov_b32_e32 v0, 0 |
| ; WAVE64-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE64-NEXT: s_waitcnt vmcnt(0) |
| ; WAVE64-NEXT: .LBB3_2: ; %bb1 |
| ; WAVE64-NEXT: v_mov_b32_e32 v0, 1 |
| ; WAVE64-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE64-NEXT: s_waitcnt vmcnt(0) |
| ; |
| ; WAVE32-LABEL: sgpr_trunc_brcond: |
| ; WAVE32: ; %bb.0: ; %entry |
| ; WAVE32-NEXT: s_load_dword s0, s[4:5], 0x24 |
| ; WAVE32-NEXT: s_waitcnt lgkmcnt(0) |
| ; WAVE32-NEXT: s_xor_b32 s0, s0, 1 |
| ; WAVE32-NEXT: s_and_b32 s0, s0, 1 |
| ; WAVE32-NEXT: s_cmp_lg_u32 s0, 0 |
| ; WAVE32-NEXT: s_cbranch_scc1 .LBB3_2 |
| ; WAVE32-NEXT: ; %bb.1: ; %bb0 |
| ; WAVE32-NEXT: v_mov_b32_e32 v0, 0 |
| ; WAVE32-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE32-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; WAVE32-NEXT: .LBB3_2: ; %bb1 |
| ; WAVE32-NEXT: v_mov_b32_e32 v0, 1 |
| ; WAVE32-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE32-NEXT: s_waitcnt_vscnt null, 0x0 |
| entry: |
| %trunc = trunc i32 %cond to i1 |
| br i1 %trunc, label %bb0, label %bb1 |
| |
| bb0: |
| store volatile i32 0, ptr addrspace(1) undef |
| unreachable |
| |
| bb1: |
| store volatile i32 1, ptr addrspace(1) undef |
| unreachable |
| } |
| |
| define amdgpu_kernel void @brcond_sgpr_trunc_and(i32 %cond0, i32 %cond1) { |
| ; WAVE64-LABEL: brcond_sgpr_trunc_and: |
| ; WAVE64: ; %bb.0: ; %entry |
| ; WAVE64-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; WAVE64-NEXT: s_waitcnt lgkmcnt(0) |
| ; WAVE64-NEXT: s_and_b32 s0, s0, s1 |
| ; WAVE64-NEXT: s_xor_b32 s0, s0, 1 |
| ; WAVE64-NEXT: s_and_b32 s0, s0, 1 |
| ; WAVE64-NEXT: s_cmp_lg_u32 s0, 0 |
| ; WAVE64-NEXT: s_cbranch_scc1 .LBB4_2 |
| ; WAVE64-NEXT: ; %bb.1: ; %bb0 |
| ; WAVE64-NEXT: v_mov_b32_e32 v0, 0 |
| ; WAVE64-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE64-NEXT: s_waitcnt vmcnt(0) |
| ; WAVE64-NEXT: .LBB4_2: ; %bb1 |
| ; WAVE64-NEXT: v_mov_b32_e32 v0, 1 |
| ; WAVE64-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE64-NEXT: s_waitcnt vmcnt(0) |
| ; |
| ; WAVE32-LABEL: brcond_sgpr_trunc_and: |
| ; WAVE32: ; %bb.0: ; %entry |
| ; WAVE32-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; WAVE32-NEXT: s_waitcnt lgkmcnt(0) |
| ; WAVE32-NEXT: s_and_b32 s0, s0, s1 |
| ; WAVE32-NEXT: s_xor_b32 s0, s0, 1 |
| ; WAVE32-NEXT: s_and_b32 s0, s0, 1 |
| ; WAVE32-NEXT: s_cmp_lg_u32 s0, 0 |
| ; WAVE32-NEXT: s_cbranch_scc1 .LBB4_2 |
| ; WAVE32-NEXT: ; %bb.1: ; %bb0 |
| ; WAVE32-NEXT: v_mov_b32_e32 v0, 0 |
| ; WAVE32-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE32-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; WAVE32-NEXT: .LBB4_2: ; %bb1 |
| ; WAVE32-NEXT: v_mov_b32_e32 v0, 1 |
| ; WAVE32-NEXT: global_store_dword v[0:1], v0, off |
| ; WAVE32-NEXT: s_waitcnt_vscnt null, 0x0 |
| entry: |
| %trunc0 = trunc i32 %cond0 to i1 |
| %trunc1 = trunc i32 %cond1 to i1 |
| %and = and i1 %trunc0, %trunc1 |
| br i1 %and, label %bb0, label %bb1 |
| |
| bb0: |
| store volatile i32 0, ptr addrspace(1) undef |
| unreachable |
| |
| bb1: |
| store volatile i32 1, ptr addrspace(1) undef |
| unreachable |
| } |