| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+v,+zvfh \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+v,+zvfh \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64 |
| |
| declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr, i16, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr %ptr, i16 %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr, i64, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64(ptr %ptr, i64 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, <vscale x 1 x i1> %b, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| define <vscale x 1 x i8> @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer |
| %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| declare <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x i8> @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2i8: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2i8: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x i8> %load |
| } |
| |
| declare <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x i8> @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4i8: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4i8: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x i8> %load |
| } |
| |
| declare <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8i8: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8i8: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x i8> %load |
| } |
| |
| define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8i8_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8i8_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer |
| %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl) |
| ret <vscale x 8 x i8> %load |
| } |
| |
| declare <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i16> @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i16> %load |
| } |
| |
| declare <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x i16> @strided_vpload_nxv2i16(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2i16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2i16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x i16> %load |
| } |
| |
| define <vscale x 2 x i16> @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2i16_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2i16_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer |
| %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl) |
| ret <vscale x 2 x i16> %load |
| } |
| |
| declare <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4i16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4i16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x i16> %load |
| } |
| |
| declare <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x i16> @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8i16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8i16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x i16> %load |
| } |
| |
| declare <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i32> @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i32> %load |
| } |
| |
| declare <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2i32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2i32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x i32> %load |
| } |
| |
| declare <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x i32> @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4i32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4i32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x i32> %load |
| } |
| |
| define <vscale x 4 x i32> @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4i32_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4i32_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer |
| %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl) |
| ret <vscale x 4 x i32> %load |
| } |
| |
| declare <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x i32> @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8i32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8i32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x i32> %load |
| } |
| |
| declare <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x i64> @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i64> %load |
| } |
| |
| define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1i64_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1i64_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer |
| %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl) |
| ret <vscale x 1 x i64> %load |
| } |
| |
| declare <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x i64> @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2i64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2i64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x i64> %load |
| } |
| |
| declare <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x i64> @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4i64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4i64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x i64> %load |
| } |
| |
| declare <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x i64> @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8i64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8i64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x i64> %load |
| } |
| |
| declare <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x half> @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1f16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1f16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x half> %load |
| } |
| |
| declare <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x half> @strided_vpload_nxv2f16(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2f16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2f16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x half> %load |
| } |
| |
| define <vscale x 2 x half> @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2f16_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2f16_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer |
| %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl) |
| ret <vscale x 2 x half> %load |
| } |
| |
| declare <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4f16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4f16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x half> %load |
| } |
| |
| declare <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x half> @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8f16: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8f16: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x half> %load |
| } |
| |
| declare <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x float> @strided_vpload_nxv1f32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1f32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1f32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x float> %load |
| } |
| |
| declare <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2f32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2f32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x float> %load |
| } |
| |
| declare <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x float> @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4f32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4f32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x float> %load |
| } |
| |
| declare <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x float> @strided_vpload_nxv8f32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8f32: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8f32: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x float> %load |
| } |
| |
| define <vscale x 8 x float> @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8f32_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8f32_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer |
| %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl) |
| ret <vscale x 8 x float> %load |
| } |
| |
| declare <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr, i32, <vscale x 1 x i1>, i32) |
| |
| define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv1f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv1f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x double> %load |
| } |
| |
| declare <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, <vscale x 2 x i1>, i32) |
| |
| define <vscale x 2 x double> @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv2f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv2f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x double> %load |
| } |
| |
| declare <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr, i32, <vscale x 4 x i1>, i32) |
| |
| define <vscale x 4 x double> @strided_vpload_nxv4f64(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x double> %load |
| } |
| |
| define <vscale x 4 x double> @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv4f64_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv4f64_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0 |
| %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer |
| %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl) |
| ret <vscale x 4 x double> %load |
| } |
| |
| declare <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr, i32, <vscale x 8 x i1>, i32) |
| |
| define <vscale x 8 x double> @strided_vpload_nxv8f64(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv8f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv8f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %load = call <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x double> %load |
| } |
| |
| ; Widening |
| define <vscale x 3 x double> @strided_vpload_nxv3f64(ptr %ptr, i32 signext %stride, <vscale x 3 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv3f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv3f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> %mask, i32 %evl) |
| ret <vscale x 3 x double> %v |
| } |
| |
| define <vscale x 3 x double> @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_vpload_nxv3f64_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_vpload_nxv3f64_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %one = insertelement <vscale x 3 x i1> poison, i1 true, i32 0 |
| %allones = shufflevector <vscale x 3 x i1> %one, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer |
| %v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> %allones, i32 %evl) |
| ret <vscale x 3 x double> %v |
| } |
| |
| declare <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr, i32, <vscale x 3 x i1>, i32) |
| |
| ; Splitting |
| define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_load_nxv16f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: vmv1r.v v8, v0 |
| ; CHECK-RV32-NEXT: csrr a4, vlenb |
| ; CHECK-RV32-NEXT: sub a2, a3, a4 |
| ; CHECK-RV32-NEXT: sltu a5, a3, a2 |
| ; CHECK-RV32-NEXT: addi a5, a5, -1 |
| ; CHECK-RV32-NEXT: and a2, a5, a2 |
| ; CHECK-RV32-NEXT: srli a5, a4, 3 |
| ; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma |
| ; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a5 |
| ; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_2 |
| ; CHECK-RV32-NEXT: # %bb.1: |
| ; CHECK-RV32-NEXT: mv a3, a4 |
| ; CHECK-RV32-NEXT: .LBB42_2: |
| ; CHECK-RV32-NEXT: mul a4, a3, a1 |
| ; CHECK-RV32-NEXT: add a4, a0, a4 |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t |
| ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vmv1r.v v0, v8 |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_load_nxv16f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: vmv1r.v v8, v0 |
| ; CHECK-RV64-NEXT: csrr a4, vlenb |
| ; CHECK-RV64-NEXT: sub a3, a2, a4 |
| ; CHECK-RV64-NEXT: sltu a5, a2, a3 |
| ; CHECK-RV64-NEXT: addi a5, a5, -1 |
| ; CHECK-RV64-NEXT: and a3, a5, a3 |
| ; CHECK-RV64-NEXT: srli a5, a4, 3 |
| ; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma |
| ; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a5 |
| ; CHECK-RV64-NEXT: bltu a2, a4, .LBB42_2 |
| ; CHECK-RV64-NEXT: # %bb.1: |
| ; CHECK-RV64-NEXT: mv a2, a4 |
| ; CHECK-RV64-NEXT: .LBB42_2: |
| ; CHECK-RV64-NEXT: mul a4, a2, a1 |
| ; CHECK-RV64-NEXT: add a4, a0, a4 |
| ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vmv1r.v v0, v8 |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: ret |
| %v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 %evl) |
| ret <vscale x 16 x double> %v |
| } |
| |
| define <vscale x 16 x double> @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 %stride, i32 zeroext %evl) { |
| ; CHECK-RV32-LABEL: strided_load_nxv16f64_allones_mask: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: csrr a4, vlenb |
| ; CHECK-RV32-NEXT: sub a2, a3, a4 |
| ; CHECK-RV32-NEXT: sltu a5, a3, a2 |
| ; CHECK-RV32-NEXT: addi a5, a5, -1 |
| ; CHECK-RV32-NEXT: and a2, a5, a2 |
| ; CHECK-RV32-NEXT: bltu a3, a4, .LBB43_2 |
| ; CHECK-RV32-NEXT: # %bb.1: |
| ; CHECK-RV32-NEXT: mv a3, a4 |
| ; CHECK-RV32-NEXT: .LBB43_2: |
| ; CHECK-RV32-NEXT: mul a4, a3, a1 |
| ; CHECK-RV32-NEXT: add a4, a0, a4 |
| ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 |
| ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_load_nxv16f64_allones_mask: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: csrr a4, vlenb |
| ; CHECK-RV64-NEXT: sub a3, a2, a4 |
| ; CHECK-RV64-NEXT: sltu a5, a2, a3 |
| ; CHECK-RV64-NEXT: addi a5, a5, -1 |
| ; CHECK-RV64-NEXT: and a3, a5, a3 |
| ; CHECK-RV64-NEXT: bltu a2, a4, .LBB43_2 |
| ; CHECK-RV64-NEXT: # %bb.1: |
| ; CHECK-RV64-NEXT: mv a2, a4 |
| ; CHECK-RV64-NEXT: .LBB43_2: |
| ; CHECK-RV64-NEXT: mul a4, a2, a1 |
| ; CHECK-RV64-NEXT: add a4, a0, a4 |
| ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 |
| ; CHECK-RV64-NEXT: ret |
| %one = insertelement <vscale x 16 x i1> poison, i1 true, i32 0 |
| %allones = shufflevector <vscale x 16 x i1> %one, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer |
| %v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %allones, i32 %evl) |
| ret <vscale x 16 x double> %v |
| } |
| |
| declare <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr, i64, <vscale x 16 x i1>, i32) |
| |
| ; Widening + splitting (with HiIsEmpty == true) |
| ; NOTE: We can't return <vscale x 17 x double> as that introduces a vector |
| ; store that can't yet be legalized through widening. In order to test purely |
| ; the vp.strided.load legalization, we manually split it. |
| define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, <vscale x 1 x double>* %hi_ptr) { |
| ; CHECK-RV32-LABEL: strided_load_nxv17f64: |
| ; CHECK-RV32: # %bb.0: |
| ; CHECK-RV32-NEXT: csrr a5, vlenb |
| ; CHECK-RV32-NEXT: slli a7, a5, 1 |
| ; CHECK-RV32-NEXT: vmv1r.v v8, v0 |
| ; CHECK-RV32-NEXT: mv a2, a3 |
| ; CHECK-RV32-NEXT: bltu a3, a7, .LBB44_2 |
| ; CHECK-RV32-NEXT: # %bb.1: |
| ; CHECK-RV32-NEXT: mv a2, a7 |
| ; CHECK-RV32-NEXT: .LBB44_2: |
| ; CHECK-RV32-NEXT: sub a6, a2, a5 |
| ; CHECK-RV32-NEXT: sltu t0, a2, a6 |
| ; CHECK-RV32-NEXT: addi t0, t0, -1 |
| ; CHECK-RV32-NEXT: and t0, t0, a6 |
| ; CHECK-RV32-NEXT: srli a6, a5, 3 |
| ; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, ma |
| ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a6 |
| ; CHECK-RV32-NEXT: mv a6, a2 |
| ; CHECK-RV32-NEXT: bltu a2, a5, .LBB44_4 |
| ; CHECK-RV32-NEXT: # %bb.3: |
| ; CHECK-RV32-NEXT: mv a6, a5 |
| ; CHECK-RV32-NEXT: .LBB44_4: |
| ; CHECK-RV32-NEXT: mul t1, a6, a1 |
| ; CHECK-RV32-NEXT: add t1, a0, t1 |
| ; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t |
| ; CHECK-RV32-NEXT: sub a7, a3, a7 |
| ; CHECK-RV32-NEXT: sltu a3, a3, a7 |
| ; CHECK-RV32-NEXT: addi a3, a3, -1 |
| ; CHECK-RV32-NEXT: and a3, a3, a7 |
| ; CHECK-RV32-NEXT: bltu a3, a5, .LBB44_6 |
| ; CHECK-RV32-NEXT: # %bb.5: |
| ; CHECK-RV32-NEXT: mv a3, a5 |
| ; CHECK-RV32-NEXT: .LBB44_6: |
| ; CHECK-RV32-NEXT: srli a5, a5, 2 |
| ; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma |
| ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a5 |
| ; CHECK-RV32-NEXT: mul a2, a2, a1 |
| ; CHECK-RV32-NEXT: add a2, a0, a2 |
| ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vlse64.v v24, (a2), a1, v0.t |
| ; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma |
| ; CHECK-RV32-NEXT: vmv1r.v v0, v8 |
| ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV32-NEXT: vs1r.v v24, (a4) |
| ; CHECK-RV32-NEXT: ret |
| ; |
| ; CHECK-RV64-LABEL: strided_load_nxv17f64: |
| ; CHECK-RV64: # %bb.0: |
| ; CHECK-RV64-NEXT: csrr a5, vlenb |
| ; CHECK-RV64-NEXT: slli a7, a5, 1 |
| ; CHECK-RV64-NEXT: vmv1r.v v8, v0 |
| ; CHECK-RV64-NEXT: mv a4, a2 |
| ; CHECK-RV64-NEXT: bltu a2, a7, .LBB44_2 |
| ; CHECK-RV64-NEXT: # %bb.1: |
| ; CHECK-RV64-NEXT: mv a4, a7 |
| ; CHECK-RV64-NEXT: .LBB44_2: |
| ; CHECK-RV64-NEXT: sub a6, a4, a5 |
| ; CHECK-RV64-NEXT: sltu t0, a4, a6 |
| ; CHECK-RV64-NEXT: addi t0, t0, -1 |
| ; CHECK-RV64-NEXT: and t0, t0, a6 |
| ; CHECK-RV64-NEXT: srli a6, a5, 3 |
| ; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, ma |
| ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a6 |
| ; CHECK-RV64-NEXT: mv a6, a4 |
| ; CHECK-RV64-NEXT: bltu a4, a5, .LBB44_4 |
| ; CHECK-RV64-NEXT: # %bb.3: |
| ; CHECK-RV64-NEXT: mv a6, a5 |
| ; CHECK-RV64-NEXT: .LBB44_4: |
| ; CHECK-RV64-NEXT: mul t1, a6, a1 |
| ; CHECK-RV64-NEXT: add t1, a0, t1 |
| ; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t |
| ; CHECK-RV64-NEXT: sub a7, a2, a7 |
| ; CHECK-RV64-NEXT: sltu a2, a2, a7 |
| ; CHECK-RV64-NEXT: addi a2, a2, -1 |
| ; CHECK-RV64-NEXT: and a2, a2, a7 |
| ; CHECK-RV64-NEXT: bltu a2, a5, .LBB44_6 |
| ; CHECK-RV64-NEXT: # %bb.5: |
| ; CHECK-RV64-NEXT: mv a2, a5 |
| ; CHECK-RV64-NEXT: .LBB44_6: |
| ; CHECK-RV64-NEXT: srli a5, a5, 2 |
| ; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma |
| ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a5 |
| ; CHECK-RV64-NEXT: mul a4, a4, a1 |
| ; CHECK-RV64-NEXT: add a4, a0, a4 |
| ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vlse64.v v24, (a4), a1, v0.t |
| ; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma |
| ; CHECK-RV64-NEXT: vmv1r.v v0, v8 |
| ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t |
| ; CHECK-RV64-NEXT: vs1r.v v24, (a3) |
| ; CHECK-RV64-NEXT: ret |
| %v = call <vscale x 17 x double> @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 %evl) |
| %lo = call <vscale x 16 x double> @llvm.experimental.vector.extract.nxv16f64(<vscale x 17 x double> %v, i64 0) |
| %hi = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64(<vscale x 17 x double> %v, i64 16) |
| store <vscale x 1 x double> %hi, <vscale x 1 x double>* %hi_ptr |
| ret <vscale x 16 x double> %lo |
| } |
| |
| declare <vscale x 17 x double> @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr, i64, <vscale x 17 x i1>, i32) |
| declare <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx) |
| declare <vscale x 16 x double> @llvm.experimental.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx) |