| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s |
| @__const.test.var_45 = private unnamed_addr constant [2 x i8] c"\D1S", align 1 |
| @__const.test.var_101 = private unnamed_addr constant [2 x i8] c"\830", align 1 |
| |
| ; Function Attrs: nounwind vscale_range(2,1024) |
| define dso_local void @test(ptr nocapture noundef %var_99) { |
| ; CHECK-LABEL: test: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: lui a1, %hi(.L__const.test.var_45) |
| ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_45) |
| ; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a1) |
| ; CHECK-NEXT: li a1, 1 |
| ; CHECK-NEXT: vmul.vx v12, v8, a1 |
| ; CHECK-NEXT: lui a1, %hi(.L__const.test.var_101) |
| ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_101) |
| ; CHECK-NEXT: vle8.v v16, (a1) |
| ; CHECK-NEXT: vmv.x.s a1, v12 |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vmsleu.vx v0, v8, a1 |
| ; CHECK-NEXT: vssra.vv v8, v16, v8 |
| ; CHECK-NEXT: vmerge.vvm v8, v8, v8, v0 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr nonnull @__const.test.var_45, i64 2) |
| %1 = tail call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %0, i8 1, i64 2) |
| %2 = tail call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr nonnull @__const.test.var_101, i64 2) |
| %3 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2) |
| %4 = tail call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %1) |
| %5 = tail call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %2, <vscale x 32 x i8> %0, i64 0, i64 2) |
| %6 = tail call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> %0, i8 %4, i64 2) |
| %7 = tail call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %5, <vscale x 32 x i8> %5, <vscale x 32 x i1> %6, i64 2) |
| tail call void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8> %7, ptr %var_99, i64 2) |
| ret void |
| } |
| |
| declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8>, ptr nocapture, i64) #1 |
| declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, i64) #2 |
| declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 |
| declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>) #2 |
| declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64, i64) #3 |
| declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8>, i8, i64) #2 |
| declare <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64) #2 |
| declare void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8>, ptr nocapture, i64) #4 |
| |
| attributes #1 = { nofree nounwind memory(read) } |
| attributes #2 = { nofree nosync nounwind memory(none) } |
| attributes #3 = { nounwind } |
| attributes #4 = { nounwind memory(write) } |