| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s |
| |
| define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind { |
| ; CHECK-LABEL: test_Pr_wide_scalar_simple: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a2 <- a0 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: mv a0, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: ret |
| entry: |
| %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i64 %0) |
| ret i64 %1 |
| } |
| |
| define i32 @test_Pr_wide_scalar_with_ops(i32 noundef %0) nounwind { |
| ; CHECK-LABEL: test_Pr_wide_scalar_with_ops: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a2 <- a0 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: or a0, a2, a3 |
| ; CHECK-NEXT: ret |
| entry: |
| %1 = zext i32 %0 to i64 |
| %2 = shl i64 %1, 32 |
| %3 = or i64 %1, %2 |
| %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i64 %3) |
| %5 = trunc i64 %4 to i32 |
| %6 = lshr i64 %4, 32 |
| %7 = trunc i64 %6 to i32 |
| %8 = or i32 %5, %7 |
| ret i32 %8 |
| } |
| |
| define i64 @test_Pr_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind { |
| ; CHECK-LABEL: test_Pr_wide_scalar_inout: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: mv a3, a2 |
| ; CHECK-NEXT: sw a0, 12(sp) |
| ; CHECK-NEXT: mv a2, a1 |
| ; CHECK-NEXT: sw a1, 0(sp) |
| ; CHECK-NEXT: sw a3, 4(sp) |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a0; a2 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: sw a0, 12(sp) |
| ; CHECK-NEXT: sw a2, 0(sp) |
| ; CHECK-NEXT: sw a3, 4(sp) |
| ; CHECK-NEXT: mv a0, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: ret |
| entry: |
| %2 = alloca ptr, align 4 |
| %3 = alloca i64, align 8 |
| store ptr %0, ptr %2, align 4 |
| store i64 %1, ptr %3, align 8 |
| %4 = load ptr, ptr %2, align 4 |
| %5 = load i64, ptr %3, align 8 |
| %6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=R,0,1"(ptr %4, i64 %5) |
| %7 = extractvalue { ptr, i64} %6, 0 |
| %8 = extractvalue { ptr, i64 } %6, 1 |
| store ptr %7, ptr %2, align 4 |
| store i64 %8, ptr %3, align 8 |
| %9 = load i64, ptr %3, align 8 |
| ret i64 %9 |
| } |
| |
| define i64 @test_cR_wide_scalar_simple(i64 noundef %0) nounwind { |
| ; CHECK-LABEL: test_cR_wide_scalar_simple: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a2 <- a0 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: mv a0, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: ret |
| entry: |
| %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %0) |
| ret i64 %1 |
| } |
| |
| define i32 @test_cR_wide_scalar_with_ops(i32 noundef %0) nounwind { |
| ; CHECK-LABEL: test_cR_wide_scalar_with_ops: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a2 <- a0 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: or a0, a2, a3 |
| ; CHECK-NEXT: ret |
| entry: |
| %1 = zext i32 %0 to i64 |
| %2 = shl i64 %1, 32 |
| %3 = or i64 %1, %2 |
| %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %3) |
| %5 = trunc i64 %4 to i32 |
| %6 = lshr i64 %4, 32 |
| %7 = trunc i64 %6 to i32 |
| %8 = or i32 %5, %7 |
| ret i32 %8 |
| } |
| |
| define i64 @test_cR_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind { |
| ; CHECK-LABEL: test_cR_wide_scalar_inout: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: mv a3, a2 |
| ; CHECK-NEXT: sw a0, 12(sp) |
| ; CHECK-NEXT: mv a2, a1 |
| ; CHECK-NEXT: sw a1, 0(sp) |
| ; CHECK-NEXT: sw a3, 4(sp) |
| ; CHECK-NEXT: #APP |
| ; CHECK-NEXT: # a0; a2 |
| ; CHECK-NEXT: #NO_APP |
| ; CHECK-NEXT: sw a0, 12(sp) |
| ; CHECK-NEXT: sw a2, 0(sp) |
| ; CHECK-NEXT: sw a3, 4(sp) |
| ; CHECK-NEXT: mv a0, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: ret |
| entry: |
| %2 = alloca ptr, align 4 |
| %3 = alloca i64, align 8 |
| store ptr %0, ptr %2, align 4 |
| store i64 %1, ptr %3, align 8 |
| %4 = load ptr, ptr %2, align 4 |
| %5 = load i64, ptr %3, align 8 |
| %6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=^cR,0,1"(ptr %4, i64 %5) |
| %7 = extractvalue { ptr, i64} %6, 0 |
| %8 = extractvalue { ptr, i64 } %6, 1 |
| store ptr %7, ptr %2, align 4 |
| store i64 %8, ptr %3, align 8 |
| %9 = load i64, ptr %3, align 8 |
| ret i64 %9 |
| } |