| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s |
| |
| define <vscale x 1 x i64> @test_vp_reverse_nxv1i64_masked(<vscale x 1 x i64> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv1i64_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64> %src, <vscale x 1 x i1> %mask, i32 %evl) |
| ret <vscale x 1 x i64> %dst |
| } |
| |
| define <vscale x 1 x i64> @test_vp_reverse_nxv1i64(<vscale x 1 x i64> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv1i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a1 |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10 |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 1 x i64> %dst |
| } |
| |
| define <vscale x 2 x i32> @test_vp_reverse_nxv2i32_masked(<vscale x 2 x i32> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv2i32_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32> %src, <vscale x 2 x i1> %mask, i32 %evl) |
| ret <vscale x 2 x i32> %dst |
| } |
| |
| define <vscale x 2 x i32> @test_vp_reverse_nxv2i32(<vscale x 2 x i32> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a1 |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10 |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 2 x i32> %dst |
| } |
| |
| define <vscale x 4 x i16> @test_vp_reverse_nxv4i16_masked(<vscale x 4 x i16> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i16_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16> %src, <vscale x 4 x i1> %mask, i32 %evl) |
| ret <vscale x 4 x i16> %dst |
| } |
| |
| define <vscale x 4 x i16> @test_vp_reverse_nxv4i16(<vscale x 4 x i16> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vid.v v9 |
| ; CHECK-NEXT: vrsub.vx v10, v9, a1 |
| ; CHECK-NEXT: vrgather.vv v9, v8, v10 |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 4 x i16> %dst |
| } |
| |
| define <vscale x 8 x i8> @test_vp_reverse_nxv8i8_masked(<vscale x 8 x i8> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i8_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8> %src, <vscale x 8 x i1> %mask, i32 %evl) |
| ret <vscale x 8 x i8> %dst |
| } |
| |
| define <vscale x 8 x i8> @test_vp_reverse_nxv8i8(<vscale x 8 x i8> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10 |
| ; CHECK-NEXT: vrsub.vx v10, v10, a1 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 8 x i8> %dst |
| } |
| |
| define <vscale x 2 x i64> @test_vp_reverse_nxv2i64_masked(<vscale x 2 x i64> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv2i64_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64> %src, <vscale x 2 x i1> %mask, i32 %evl) |
| ret <vscale x 2 x i64> %dst |
| } |
| |
| define <vscale x 2 x i64> @test_vp_reverse_nxv2i64(<vscale x 2 x i64> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a1 |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 2 x i64> %dst |
| } |
| |
| define <vscale x 4 x i32> @test_vp_reverse_nxv4i32_masked(<vscale x 4 x i32> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i32_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> %src, <vscale x 4 x i1> %mask, i32 %evl) |
| ret <vscale x 4 x i32> %dst |
| } |
| |
| define <vscale x 4 x i32> @test_vp_reverse_nxv4i32(<vscale x 4 x i32> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a1 |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 4 x i32> %dst |
| } |
| |
| define <vscale x 8 x i16> @test_vp_reverse_nxv8i16_masked(<vscale x 8 x i16> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i16_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16> %src, <vscale x 8 x i1> %mask, i32 %evl) |
| ret <vscale x 8 x i16> %dst |
| } |
| |
| define <vscale x 8 x i16> @test_vp_reverse_nxv8i16(<vscale x 8 x i16> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10 |
| ; CHECK-NEXT: vrsub.vx v12, v10, a1 |
| ; CHECK-NEXT: vrgather.vv v10, v8, v12 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 8 x i16> %dst |
| } |
| |
| define <vscale x 16 x i8> @test_vp_reverse_nxv16i8_masked(<vscale x 16 x i8> %src, <vscale x 16 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i8_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v12, v12, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> %src, <vscale x 16 x i1> %mask, i32 %evl) |
| ret <vscale x 16 x i8> %dst |
| } |
| |
| define <vscale x 16 x i8> @test_vp_reverse_nxv16i8(<vscale x 16 x i8> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12 |
| ; CHECK-NEXT: vrsub.vx v12, v12, a1 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 16 x i8> %dst |
| } |
| |
| define <vscale x 4 x i64> @test_vp_reverse_nxv4i64_masked(<vscale x 4 x i64> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i64_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64> %src, <vscale x 4 x i1> %mask, i32 %evl) |
| ret <vscale x 4 x i64> %dst |
| } |
| |
| define <vscale x 4 x i64> @test_vp_reverse_nxv4i64(<vscale x 4 x i64> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv4i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a1 |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16 |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 4 x i64> %dst |
| } |
| |
| define <vscale x 8 x i32> @test_vp_reverse_nxv8i32_masked(<vscale x 8 x i32> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i32_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32> %src, <vscale x 8 x i1> %mask, i32 %evl) |
| ret <vscale x 8 x i32> %dst |
| } |
| |
| define <vscale x 8 x i32> @test_vp_reverse_nxv8i32(<vscale x 8 x i32> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a1 |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16 |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 8 x i32> %dst |
| } |
| |
| define <vscale x 16 x i16> @test_vp_reverse_nxv16i16_masked(<vscale x 16 x i16> %src, <vscale x 16 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i16_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16> %src, <vscale x 16 x i1> %mask, i32 %evl) |
| ret <vscale x 16 x i16> %dst |
| } |
| |
| define <vscale x 16 x i16> @test_vp_reverse_nxv16i16(<vscale x 16 x i16> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma |
| ; CHECK-NEXT: vid.v v12 |
| ; CHECK-NEXT: vrsub.vx v16, v12, a1 |
| ; CHECK-NEXT: vrgather.vv v12, v8, v16 |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 16 x i16> %dst |
| } |
| |
| define <vscale x 32 x i8> @test_vp_reverse_nxv32i8_masked(<vscale x 32 x i8> %src, <vscale x 32 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv32i8_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v16, v16, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8> %src, <vscale x 32 x i1> %mask, i32 %evl) |
| ret <vscale x 32 x i8> %dst |
| } |
| |
| define <vscale x 32 x i8> @test_vp_reverse_nxv32i8(<vscale x 32 x i8> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv32i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: vrsub.vx v16, v16, a1 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 |
| ; CHECK-NEXT: vmv.v.v v8, v12 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 32 x i8> %dst |
| } |
| |
| define <vscale x 8 x i64> @test_vp_reverse_nxv8i64_masked(<vscale x 8 x i64> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i64_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64> %src, <vscale x 8 x i1> %mask, i32 %evl) |
| ret <vscale x 8 x i64> %dst |
| } |
| |
| define <vscale x 8 x i64> @test_vp_reverse_nxv8i64(<vscale x 8 x i64> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv8i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a1 |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24 |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 8 x i64> %dst |
| } |
| |
| define <vscale x 16 x i32> @test_vp_reverse_nxv16i32_masked(<vscale x 16 x i32> %src, <vscale x 16 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i32_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32> %src, <vscale x 16 x i1> %mask, i32 %evl) |
| ret <vscale x 16 x i32> %dst |
| } |
| |
| define <vscale x 16 x i32> @test_vp_reverse_nxv16i32(<vscale x 16 x i32> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv16i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a1 |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24 |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 16 x i32> %dst |
| } |
| |
| define <vscale x 32 x i16> @test_vp_reverse_nxv32i16_masked(<vscale x 32 x i16> %src, <vscale x 32 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv32i16_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16, v0.t |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16> %src, <vscale x 32 x i1> %mask, i32 %evl) |
| ret <vscale x 32 x i16> %dst |
| } |
| |
| define <vscale x 32 x i16> @test_vp_reverse_nxv32i16(<vscale x 32 x i16> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv32i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a1 |
| ; CHECK-NEXT: vrgather.vv v16, v8, v24 |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 32 x i16> %dst |
| } |
| |
| define <vscale x 64 x i8> @test_vp_reverse_nxv64i8_masked(<vscale x 64 x i8> %src, <vscale x 64 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv64i8_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: addi a2, a1, -1 |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a2 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v23, v8, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v22, v9, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v21, v10, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v20, v11, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v19, v12, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v18, v13, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v17, v14, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v16, v15, v24 |
| ; CHECK-NEXT: sub a1, a1, a0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v16, a1, v0.t |
| ; CHECK-NEXT: ret |
| %dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> %mask, i32 %evl) |
| ret <vscale x 64 x i8> %dst |
| } |
| |
| define <vscale x 64 x i8> @test_vp_reverse_nxv64i8(<vscale x 64 x i8> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv64i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: addi a2, a1, -1 |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: vrsub.vx v24, v16, a2 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v23, v8, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v22, v9, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v21, v10, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v20, v11, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v19, v12, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v18, v13, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v17, v14, v24 |
| ; CHECK-NEXT: vrgatherei16.vv v16, v15, v24 |
| ; CHECK-NEXT: sub a1, a1, a0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v16, a1 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 64 x i8> %dst |
| } |
| |
| define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i32 zeroext %evl) { |
| ; CHECK-LABEL: test_vp_reverse_nxv128i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrr a2, vlenb |
| ; CHECK-NEXT: slli a2, a2, 3 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: bltu a0, a2, .LBB32_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: mv a1, a2 |
| ; CHECK-NEXT: .LBB32_2: |
| ; CHECK-NEXT: addi sp, sp, -80 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill |
| ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill |
| ; CHECK-NEXT: .cfi_offset ra, -8 |
| ; CHECK-NEXT: .cfi_offset s0, -16 |
| ; CHECK-NEXT: addi s0, sp, 80 |
| ; CHECK-NEXT: .cfi_def_cfa s0, 0 |
| ; CHECK-NEXT: csrr a3, vlenb |
| ; CHECK-NEXT: slli a3, a3, 4 |
| ; CHECK-NEXT: sub sp, sp, a3 |
| ; CHECK-NEXT: andi sp, sp, -64 |
| ; CHECK-NEXT: addi a3, sp, 64 |
| ; CHECK-NEXT: li a4, -1 |
| ; CHECK-NEXT: sub a5, a0, a2 |
| ; CHECK-NEXT: add a6, a0, a3 |
| ; CHECK-NEXT: sltu a0, a0, a5 |
| ; CHECK-NEXT: add a2, a3, a2 |
| ; CHECK-NEXT: addi a6, a6, -1 |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vsse8.v v8, (a6), a4 |
| ; CHECK-NEXT: sub a6, a6, a1 |
| ; CHECK-NEXT: and a0, a0, a5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vsse8.v v16, (a6), a4 |
| ; CHECK-NEXT: vle8.v v16, (a2) |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a3) |
| ; CHECK-NEXT: addi sp, s0, -80 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 80 |
| ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload |
| ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload |
| ; CHECK-NEXT: .cfi_restore ra |
| ; CHECK-NEXT: .cfi_restore s0 |
| ; CHECK-NEXT: addi sp, sp, 80 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| |
| %dst = call <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8> %src, <vscale x 128 x i1> splat (i1 1), i32 %evl) |
| ret <vscale x 128 x i8> %dst |
| } |
| |
| ; LMUL = 1 |
| declare <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i1>,i32) |
| declare <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i1>,i32) |
| declare <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1>,i32) |
| declare <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i1>,i32) |
| |
| ; LMUL = 2 |
| declare <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i1>,i32) |
| declare <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i1>,i32) |
| declare <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1>,i32) |
| declare <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i1>,i32) |
| |
| ; LMUL = 4 |
| declare <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i1>,i32) |
| declare <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i1>,i32) |
| declare <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i1>,i32) |
| declare <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i1>,i32) |
| |
| ; LMUL = 8 |
| declare <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64>,<vscale x 8 x i1>,i32) |
| declare <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32>,<vscale x 16 x i1>,i32) |
| declare <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16>,<vscale x 32 x i1>,i32) |
| declare <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8>,<vscale x 64 x i1>,i32) |
| |
| declare <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8>,<vscale x 128 x i1>,i32) |