| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| FileCheck --check-prefixes=CHECK-PTX64 %s |
| ; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| FileCheck --check-prefixes=CHECK-PTX-SHARED32 %s |
| ; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %} |
| ; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| %ptxas-verify -arch=sm_90 %} |
| |
| target triple = "nvptx64-nvidia-cuda" |
| |
| declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1) |
| declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1), ptr addrspace(3), i32, i64, i1) |
| declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32) |
| declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1), i32, i64, i1) |
| |
| define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size, i16 %mc, i64 %ch) { |
| ; CHECK-PTX64-LABEL: cp_async_bulk_g2s( |
| ; CHECK-PTX64: { |
| ; CHECK-PTX64-NEXT: .reg .b16 %rs<2>; |
| ; CHECK-PTX64-NEXT: .reg .b32 %r<2>; |
| ; CHECK-PTX64-NEXT: .reg .b64 %rd<5>; |
| ; CHECK-PTX64-EMPTY: |
| ; CHECK-PTX64-NEXT: // %bb.0: |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [cp_async_bulk_g2s_param_0]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd2, [cp_async_bulk_g2s_param_1]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd3, [cp_async_bulk_g2s_param_2]; |
| ; CHECK-PTX64-NEXT: ld.param.u32 %r1, [cp_async_bulk_g2s_param_3]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%rd3], [%rd1], %r1, [%rd2]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd4, [cp_async_bulk_g2s_param_5]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rd4; |
| ; CHECK-PTX64-NEXT: ld.param.u16 %rs1, [cp_async_bulk_g2s_param_4]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%rd3], [%rd1], %r1, [%rd2], %rs1; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rs1, %rd4; |
| ; CHECK-PTX64-NEXT: ret; |
| ; |
| ; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_g2s( |
| ; CHECK-PTX-SHARED32: { |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b16 %rs<2>; |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<4>; |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b64 %rd<3>; |
| ; CHECK-PTX-SHARED32-EMPTY: |
| ; CHECK-PTX-SHARED32-NEXT: // %bb.0: |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u64 %rd1, [cp_async_bulk_g2s_param_0]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r1, [cp_async_bulk_g2s_param_1]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r2, [cp_async_bulk_g2s_param_2]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r3, [cp_async_bulk_g2s_param_3]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%r2], [%rd1], %r3, [%r1]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u64 %rd2, [cp_async_bulk_g2s_param_5]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rd2; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u16 %rs1, [cp_async_bulk_g2s_param_4]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2; |
| ; CHECK-PTX-SHARED32-NEXT: ret; |
| tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0) |
| tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1) |
| tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0) |
| tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1) |
| ret void |
| } |
| |
| define void @cp_async_bulk_s2g(ptr addrspace(3) %src, ptr addrspace(1) %dst, i32 %size, i64 %ch) { |
| ; CHECK-PTX64-LABEL: cp_async_bulk_s2g( |
| ; CHECK-PTX64: { |
| ; CHECK-PTX64-NEXT: .reg .b32 %r<2>; |
| ; CHECK-PTX64-NEXT: .reg .b64 %rd<4>; |
| ; CHECK-PTX64-EMPTY: |
| ; CHECK-PTX64-NEXT: // %bb.0: |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [cp_async_bulk_s2g_param_0]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd2, [cp_async_bulk_s2g_param_1]; |
| ; CHECK-PTX64-NEXT: ld.param.u32 %r1, [cp_async_bulk_s2g_param_2]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.global.shared::cta.bulk_group [%rd2], [%rd1], %r1; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd3, [cp_async_bulk_s2g_param_3]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd2], [%rd1], %r1, %rd3; |
| ; CHECK-PTX64-NEXT: ret; |
| ; |
| ; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_s2g( |
| ; CHECK-PTX-SHARED32: { |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<3>; |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b64 %rd<3>; |
| ; CHECK-PTX-SHARED32-EMPTY: |
| ; CHECK-PTX-SHARED32-NEXT: // %bb.0: |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r1, [cp_async_bulk_s2g_param_0]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u64 %rd1, [cp_async_bulk_s2g_param_1]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r2, [cp_async_bulk_s2g_param_2]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.global.shared::cta.bulk_group [%rd1], [%r1], %r2; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u64 %rd2, [cp_async_bulk_s2g_param_3]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd1], [%r1], %r2, %rd2; |
| ; CHECK-PTX-SHARED32-NEXT: ret; |
| tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 0, i1 0) |
| tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 %ch, i1 1) |
| ret void |
| } |
| |
| define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size) { |
| ; CHECK-PTX64-LABEL: cp_async_bulk_cta_to_cluster( |
| ; CHECK-PTX64: { |
| ; CHECK-PTX64-NEXT: .reg .b32 %r<2>; |
| ; CHECK-PTX64-NEXT: .reg .b64 %rd<4>; |
| ; CHECK-PTX64-EMPTY: |
| ; CHECK-PTX64-NEXT: // %bb.0: |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [cp_async_bulk_cta_to_cluster_param_0]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd2, [cp_async_bulk_cta_to_cluster_param_1]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd3, [cp_async_bulk_cta_to_cluster_param_2]; |
| ; CHECK-PTX64-NEXT: ld.param.u32 %r1, [cp_async_bulk_cta_to_cluster_param_3]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%rd3], [%rd1], %r1, [%rd2]; |
| ; CHECK-PTX64-NEXT: ret; |
| ; |
| ; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_cta_to_cluster( |
| ; CHECK-PTX-SHARED32: { |
| ; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<5>; |
| ; CHECK-PTX-SHARED32-EMPTY: |
| ; CHECK-PTX-SHARED32-NEXT: // %bb.0: |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r1, [cp_async_bulk_cta_to_cluster_param_0]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r2, [cp_async_bulk_cta_to_cluster_param_1]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r3, [cp_async_bulk_cta_to_cluster_param_2]; |
| ; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r4, [cp_async_bulk_cta_to_cluster_param_3]; |
| ; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%r3], [%r1], %r4, [%r2]; |
| ; CHECK-PTX-SHARED32-NEXT: ret; |
| tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size) |
| ret void |
| } |
| |
| define void @cp_async_bulk_prefetch(ptr addrspace(1) %src, i32 %size, i64 %ch) { |
| ; CHECK-PTX64-LABEL: cp_async_bulk_prefetch( |
| ; CHECK-PTX64: { |
| ; CHECK-PTX64-NEXT: .reg .b32 %r<2>; |
| ; CHECK-PTX64-NEXT: .reg .b64 %rd<3>; |
| ; CHECK-PTX64-EMPTY: |
| ; CHECK-PTX64-NEXT: // %bb.0: |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [cp_async_bulk_prefetch_param_0]; |
| ; CHECK-PTX64-NEXT: ld.param.u32 %r1, [cp_async_bulk_prefetch_param_1]; |
| ; CHECK-PTX64-NEXT: ld.param.u64 %rd2, [cp_async_bulk_prefetch_param_2]; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.prefetch.L2.global.L2::cache_hint [%rd1], %r1, %rd2; |
| ; CHECK-PTX64-NEXT: cp.async.bulk.prefetch.L2.global [%rd1], %r1; |
| ; CHECK-PTX64-NEXT: ret; |
| tail call void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 %ch, i1 1) |
| tail call void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 0, i1 0) |
| ret void |
| } |