| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zfh,+zvfh -target-abi=ilp32d \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s \ |
| ; RUN: --check-prefixes=CHECK,ZVFH,ZVFH32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zfh,+zvfh -target-abi=lp64d \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s \ |
| ; RUN: --check-prefixes=CHECK,ZVFH,ZVFH64 |
| ; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zfh,+zvfhmin -target-abi=ilp32d \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zfh,+zvfhmin -target-abi=lp64d \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s \ |
| ; RUN: --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64 |
| |
| declare <7 x i1> @llvm.vp.fcmp.v7f16(<7 x half>, <7 x half>, metadata, <7 x i1>, i32) |
| |
| define <7 x i1> @fcmp_oeq_vv_v7f16(<7 x half> %va, <7 x half> %vb, <7 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oeq_vv_v7f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vv v0, v8, v9, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oeq_vv_v7f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <7 x i1> @llvm.vp.fcmp.v7f16(<7 x half> %va, <7 x half> %vb, metadata !"oeq", <7 x i1> %m, i32 %evl) |
| ret <7 x i1> %v |
| } |
| |
| declare <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half>, <8 x half>, metadata, <8 x i1>, i32) |
| |
| define <8 x i1> @fcmp_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oeq_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vv v0, v8, v9, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oeq_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oeq_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oeq_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oeq_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oeq_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oeq_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oeq_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ogt_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v0, v9, v8, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ogt_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ogt_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ogt_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ogt_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ogt_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oge_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vv v0, v9, v8, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oge_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oge_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfge.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oge_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oge_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_oge_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_olt_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v0, v8, v9, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_olt_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_olt_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_olt_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_olt_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_olt_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ole_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vv v0, v8, v9, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ole_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ole_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ole_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ole_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfge.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ole_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_one_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v10, v8, v9, v0.t |
| ; ZVFH-NEXT: vmflt.vv v8, v9, v8, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v8, v10 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_one_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_one_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v9, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmfgt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_one_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_one_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v9, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmflt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_one_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ord_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vv v9, v9, v9, v0.t |
| ; ZVFH-NEXT: vmfeq.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmand.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ord_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmand.mm v0, v8, v9 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ord_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFH-NEXT: vfmv.v.f v9, fa0 |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vf v9, v9, fa0, v0.t |
| ; ZVFH-NEXT: vmfeq.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmand.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ord_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmand.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ord_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFH-NEXT: vfmv.v.f v9, fa0 |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfeq.vf v9, v9, fa0, v0.t |
| ; ZVFH-NEXT: vmfeq.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmand.mm v0, v9, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ord_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmand.mm v0, v8, v9 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ueq_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v10, v8, v9, v0.t |
| ; ZVFH-NEXT: vmflt.vv v8, v9, v8, v0.t |
| ; ZVFH-NEXT: vmnor.mm v0, v8, v10 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ueq_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ueq_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v9, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmfgt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ueq_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ueq_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v9, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmflt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ueq_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmflt.vv v9, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ugt_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vv v8, v8, v9, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ugt_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ugt_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ugt_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ugt_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfge.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ugt_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uge_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v8, v8, v9, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uge_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uge_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uge_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uge_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uge_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ult_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vv v8, v9, v8, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ult_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ult_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfge.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ult_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ult_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfle.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ult_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfle.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ule_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vv v8, v9, v8, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ule_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ule_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfgt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ule_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_ule_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmflt.vf v8, v8, fa0, v0.t |
| ; ZVFH-NEXT: vmnot.m v0, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_ule_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmflt.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmnot.m v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_une_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vv v0, v8, v9, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_une_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_une_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_une_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v10, v12, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_une_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vf v0, v8, fa0, v0.t |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_une_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vmv.v.x v9, a1 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v12, v10, v0.t |
| ; ZVFHMIN-NEXT: vmv1r.v v0, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uno_vv_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vv v9, v9, v9, v0.t |
| ; ZVFH-NEXT: vmfne.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uno_vv_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFHMIN-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uno_vf_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFH-NEXT: vfmv.v.f v9, fa0 |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vf v9, v9, fa0, v0.t |
| ; ZVFH-NEXT: vmfne.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uno_vf_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v9, v8 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_uno_vf_swap_v8f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFH-NEXT: vfmv.v.f v9, fa0 |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; ZVFH-NEXT: vmfne.vf v9, v9, fa0, v0.t |
| ; ZVFH-NEXT: vmfne.vv v8, v8, v8, v0.t |
| ; ZVFH-NEXT: vmor.mm v0, v9, v8 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN-LABEL: fcmp_uno_vf_swap_v8f16: |
| ; ZVFHMIN: # %bb.0: |
| ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vmv.v.x v8, a1 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v9, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 |
| ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t |
| ; ZVFHMIN-NEXT: vmor.mm v0, v8, v9 |
| ; ZVFHMIN-NEXT: ret |
| %elt.head = insertelement <8 x half> poison, half %b, i32 0 |
| %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %vb, <8 x half> %va, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| declare <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half>, <128 x half>, metadata, <128 x i1>, i32) |
| |
| define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128 x i1> %m, i32 zeroext %evl) { |
| ; ZVFH-LABEL: fcmp_oeq_vv_v128f16: |
| ; ZVFH: # %bb.0: |
| ; ZVFH-NEXT: addi sp, sp, -16 |
| ; ZVFH-NEXT: .cfi_def_cfa_offset 16 |
| ; ZVFH-NEXT: csrr a1, vlenb |
| ; ZVFH-NEXT: slli a1, a1, 4 |
| ; ZVFH-NEXT: sub sp, sp, a1 |
| ; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb |
| ; ZVFH-NEXT: csrr a1, vlenb |
| ; ZVFH-NEXT: slli a1, a1, 3 |
| ; ZVFH-NEXT: add a1, sp, a1 |
| ; ZVFH-NEXT: addi a1, a1, 16 |
| ; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill |
| ; ZVFH-NEXT: addi a1, a0, 128 |
| ; ZVFH-NEXT: li a3, 64 |
| ; ZVFH-NEXT: vsetvli zero, a3, e16, m8, ta, ma |
| ; ZVFH-NEXT: vle16.v v16, (a1) |
| ; ZVFH-NEXT: addi a1, sp, 16 |
| ; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill |
| ; ZVFH-NEXT: vle16.v v16, (a0) |
| ; ZVFH-NEXT: mv a0, a2 |
| ; ZVFH-NEXT: vsetivli zero, 8, e8, m1, ta, ma |
| ; ZVFH-NEXT: vslidedown.vi v24, v0, 8 |
| ; ZVFH-NEXT: bltu a2, a3, .LBB43_2 |
| ; ZVFH-NEXT: # %bb.1: |
| ; ZVFH-NEXT: li a0, 64 |
| ; ZVFH-NEXT: .LBB43_2: |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; ZVFH-NEXT: vmfeq.vv v7, v8, v16, v0.t |
| ; ZVFH-NEXT: addi a0, a2, -64 |
| ; ZVFH-NEXT: sltu a1, a2, a0 |
| ; ZVFH-NEXT: addi a1, a1, -1 |
| ; ZVFH-NEXT: and a0, a1, a0 |
| ; ZVFH-NEXT: vmv1r.v v0, v24 |
| ; ZVFH-NEXT: csrr a1, vlenb |
| ; ZVFH-NEXT: slli a1, a1, 3 |
| ; ZVFH-NEXT: add a1, sp, a1 |
| ; ZVFH-NEXT: addi a1, a1, 16 |
| ; ZVFH-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload |
| ; ZVFH-NEXT: addi a1, sp, 16 |
| ; ZVFH-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload |
| ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| ; ZVFH-NEXT: vmfeq.vv v8, v16, v24, v0.t |
| ; ZVFH-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; ZVFH-NEXT: vslideup.vi v7, v8, 8 |
| ; ZVFH-NEXT: vmv.v.v v0, v7 |
| ; ZVFH-NEXT: csrr a0, vlenb |
| ; ZVFH-NEXT: slli a0, a0, 4 |
| ; ZVFH-NEXT: add sp, sp, a0 |
| ; ZVFH-NEXT: .cfi_def_cfa sp, 16 |
| ; ZVFH-NEXT: addi sp, sp, 16 |
| ; ZVFH-NEXT: .cfi_def_cfa_offset 0 |
| ; ZVFH-NEXT: ret |
| ; |
| ; ZVFHMIN32-LABEL: fcmp_oeq_vv_v128f16: |
| ; ZVFHMIN32: # %bb.0: |
| ; ZVFHMIN32-NEXT: addi sp, sp, -896 |
| ; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 896 |
| ; ZVFHMIN32-NEXT: sw ra, 892(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s0, 888(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s2, 884(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s3, 880(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s4, 876(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s5, 872(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s6, 868(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s7, 864(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s8, 860(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s9, 856(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s10, 852(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: sw s11, 848(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: .cfi_offset ra, -4 |
| ; ZVFHMIN32-NEXT: .cfi_offset s0, -8 |
| ; ZVFHMIN32-NEXT: .cfi_offset s2, -12 |
| ; ZVFHMIN32-NEXT: .cfi_offset s3, -16 |
| ; ZVFHMIN32-NEXT: .cfi_offset s4, -20 |
| ; ZVFHMIN32-NEXT: .cfi_offset s5, -24 |
| ; ZVFHMIN32-NEXT: .cfi_offset s6, -28 |
| ; ZVFHMIN32-NEXT: .cfi_offset s7, -32 |
| ; ZVFHMIN32-NEXT: .cfi_offset s8, -36 |
| ; ZVFHMIN32-NEXT: .cfi_offset s9, -40 |
| ; ZVFHMIN32-NEXT: .cfi_offset s10, -44 |
| ; ZVFHMIN32-NEXT: .cfi_offset s11, -48 |
| ; ZVFHMIN32-NEXT: addi s0, sp, 896 |
| ; ZVFHMIN32-NEXT: .cfi_def_cfa s0, 0 |
| ; ZVFHMIN32-NEXT: csrr a1, vlenb |
| ; ZVFHMIN32-NEXT: li a2, 30 |
| ; ZVFHMIN32-NEXT: mul a1, a1, a2 |
| ; ZVFHMIN32-NEXT: sub sp, sp, a1 |
| ; ZVFHMIN32-NEXT: andi sp, sp, -128 |
| ; ZVFHMIN32-NEXT: addi a1, a0, 128 |
| ; ZVFHMIN32-NEXT: li a2, 64 |
| ; ZVFHMIN32-NEXT: addi a3, sp, 640 |
| ; ZVFHMIN32-NEXT: addi a4, sp, 384 |
| ; ZVFHMIN32-NEXT: addi a5, sp, 512 |
| ; ZVFHMIN32-NEXT: vsetvli zero, a2, e16, m8, ta, ma |
| ; ZVFHMIN32-NEXT: vle16.v v0, (a0) |
| ; ZVFHMIN32-NEXT: addi a0, sp, 256 |
| ; ZVFHMIN32-NEXT: vle16.v v24, (a1) |
| ; ZVFHMIN32-NEXT: vse16.v v8, (a3) |
| ; ZVFHMIN32-NEXT: vse16.v v0, (a4) |
| ; ZVFHMIN32-NEXT: vse16.v v16, (a5) |
| ; ZVFHMIN32-NEXT: vse16.v v24, (a0) |
| ; ZVFHMIN32-NEXT: lh a0, 704(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 448(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 160(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 702(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 446(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 159(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 700(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 444(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 158(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 698(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 442(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 157(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 696(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 440(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 156(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 694(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 438(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 155(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 692(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 436(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 154(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 690(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 434(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 153(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 688(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 432(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 152(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 686(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 430(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 151(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 684(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 428(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 150(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 682(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 426(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 149(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 680(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 424(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 148(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 678(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 422(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 147(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 676(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 420(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 146(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 674(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 418(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a2 |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa4, fa3 |
| ; ZVFHMIN32-NEXT: sb a0, 145(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 672(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 416(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a2, 128(sp) |
| ; ZVFHMIN32-NEXT: sb a0, 144(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 576(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 320(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 224(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 574(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 318(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 223(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 572(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 316(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 222(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 570(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 314(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 221(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 568(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 312(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 220(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 566(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 310(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 219(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 564(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 308(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 218(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 562(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 306(sp) |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 7 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 29 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 6 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 28 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 5 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 27 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 4 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 26 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 3 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 25 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 2 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 24 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 1 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 23 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v26, v8, 15 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v20, v8, 14 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v28, v8, 13 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v8, 12 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 1 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v4, v8, 11 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v2, v8, 10 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v30, v8, 9 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v22, v8, 8 |
| ; ZVFHMIN32-NEXT: vmv.x.s a4, v16 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 217(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 560(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 304(sp) |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v3, v16, 7 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v31, v16, 6 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v5, v16, 5 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v23, v16, 4 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v16, 3 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 21 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v16, 2 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 20 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v16, 1 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 22 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v18, v16, 15 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v14, v16, 14 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v16, 13 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v12, v16, 12 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v10, v16, 11 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v6, v16, 10 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 18 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v6, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v6, v16, 9 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 14 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v6, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v6, v16, 8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 216(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 558(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 302(sp) |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v13, v0, 7 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v29, v0, 6 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v11, v0, 5 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v7, v0, 4 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v9, v0, 3 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v21, v0, 2 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v27, v0, 1 |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 15 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 2 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 14 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 13 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 6 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 12 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 12 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 11 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 10 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 10 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 4 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vslidedown.vi v16, v0, 9 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v0, v0, 8 |
| ; ZVFHMIN32-NEXT: addi a2, sp, 848 |
| ; ZVFHMIN32-NEXT: vs2r.v v0, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN32-NEXT: vmv.x.s t4, v26 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 215(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 556(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 300(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s t3, v20 |
| ; ZVFHMIN32-NEXT: vmv.x.s t1, v28 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 214(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 554(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 298(sp) |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 1 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v0, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s t2, v0 |
| ; ZVFHMIN32-NEXT: vmv.x.s t0, v4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 213(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 552(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 296(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a7, v2 |
| ; ZVFHMIN32-NEXT: vmv.x.s a6, v30 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 212(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 550(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 294(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a5, v22 |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v18 |
| ; ZVFHMIN32-NEXT: sw a2, 112(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 211(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 548(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 292(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a0, v14 |
| ; ZVFHMIN32-NEXT: sw a0, 116(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: vmv.x.s a0, v8 |
| ; ZVFHMIN32-NEXT: sw a0, 124(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t5 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a1, 210(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 546(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 290(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a4 |
| ; ZVFHMIN32-NEXT: vmv.x.s a4, v24 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, t5 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa4, fa3 |
| ; ZVFHMIN32-NEXT: sb a1, 209(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 544(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 288(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN32-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t5 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a4, 192(sp) |
| ; ZVFHMIN32-NEXT: sb a1, 208(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 738(sp) |
| ; ZVFHMIN32-NEXT: lh t6, 482(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a0, v12 |
| ; ZVFHMIN32-NEXT: sw a0, 108(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: vmv.x.s a0, v10 |
| ; ZVFHMIN32-NEXT: sw a0, 120(sp) # 4-byte Folded Spill |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN32-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb t5, 177(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 736(sp) |
| ; ZVFHMIN32-NEXT: lh t6, 480(sp) |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 29 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s5, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 28 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s6, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN32-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb t5, 176(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 734(sp) |
| ; ZVFHMIN32-NEXT: lh t6, 478(sp) |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 27 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s7, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 26 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s8, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN32-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb t5, 175(sp) |
| ; ZVFHMIN32-NEXT: lh t5, 732(sp) |
| ; ZVFHMIN32-NEXT: lh t6, 476(sp) |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 25 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s4, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 24 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s3, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN32-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb t5, 174(sp) |
| ; ZVFHMIN32-NEXT: lh t6, 730(sp) |
| ; ZVFHMIN32-NEXT: lh s9, 474(sp) |
| ; ZVFHMIN32-NEXT: csrr a0, vlenb |
| ; ZVFHMIN32-NEXT: li a1, 23 |
| ; ZVFHMIN32-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN32-NEXT: add a0, sp, a0 |
| ; ZVFHMIN32-NEXT: lh s2, 848(a0) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s t5, v3 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t6 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s9 |
| ; ZVFHMIN32-NEXT: feq.h t6, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb t6, 173(sp) |
| ; ZVFHMIN32-NEXT: lh s9, 728(sp) |
| ; ZVFHMIN32-NEXT: lh s10, 472(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s t6, v31 |
| ; ZVFHMIN32-NEXT: vmv.x.s ra, v13 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s9 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s10 |
| ; ZVFHMIN32-NEXT: feq.h s9, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb s9, 172(sp) |
| ; ZVFHMIN32-NEXT: lh s9, 726(sp) |
| ; ZVFHMIN32-NEXT: lh s10, 470(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v29 |
| ; ZVFHMIN32-NEXT: vmv.x.s a3, v11 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s9 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s10 |
| ; ZVFHMIN32-NEXT: feq.h s9, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb s9, 171(sp) |
| ; ZVFHMIN32-NEXT: lh s10, 724(sp) |
| ; ZVFHMIN32-NEXT: lh s11, 468(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a4, v7 |
| ; ZVFHMIN32-NEXT: vmv.x.s s9, v9 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s10 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s11 |
| ; ZVFHMIN32-NEXT: feq.h s10, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb s10, 170(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 722(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 466(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s s10, v21 |
| ; ZVFHMIN32-NEXT: vmv.x.s s11, v27 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 169(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 720(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 464(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s6 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 168(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 718(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 462(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, s7 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, s8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa1, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa0, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa1, fa0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa1, ra |
| ; ZVFHMIN32-NEXT: sb a0, 167(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 716(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa0, a2 |
| ; ZVFHMIN32-NEXT: lh a1, 460(sp) |
| ; ZVFHMIN32-NEXT: feq.h s5, fa5, fa1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa4, fa0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s4 |
| ; ZVFHMIN32-NEXT: sb a1, 166(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 714(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 458(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a3 |
| ; ZVFHMIN32-NEXT: feq.h a3, fa3, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a2 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa4, fa3 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s3 |
| ; ZVFHMIN32-NEXT: sb a1, 165(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 712(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 456(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a4 |
| ; ZVFHMIN32-NEXT: feq.h a4, fa2, fa3 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a2 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa3, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, s2 |
| ; ZVFHMIN32-NEXT: sb a1, 164(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 710(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 454(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, s9 |
| ; ZVFHMIN32-NEXT: feq.h s2, fa5, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a2 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s10 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, s11 |
| ; ZVFHMIN32-NEXT: sb a1, 163(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 708(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 452(sp) |
| ; ZVFHMIN32-NEXT: feq.h s3, fa4, fa5 |
| ; ZVFHMIN32-NEXT: feq.h s4, fa3, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a1, 162(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 706(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 450(sp) |
| ; ZVFHMIN32-NEXT: sb s4, 129(sp) |
| ; ZVFHMIN32-NEXT: sb s3, 130(sp) |
| ; ZVFHMIN32-NEXT: sb s2, 131(sp) |
| ; ZVFHMIN32-NEXT: sb a4, 132(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a3, 133(sp) |
| ; ZVFHMIN32-NEXT: sb a0, 134(sp) |
| ; ZVFHMIN32-NEXT: sb s5, 135(sp) |
| ; ZVFHMIN32-NEXT: sb a1, 161(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 610(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 354(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s s6, v5 |
| ; ZVFHMIN32-NEXT: vmv.x.s s5, v23 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 241(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 608(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 352(sp) |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 21 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: lh s4, 848(a2) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 20 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: lh s3, 848(a2) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 240(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 606(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 350(sp) |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 22 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: lh s2, 848(a2) # 8-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa4, fa3 |
| ; ZVFHMIN32-NEXT: sb a0, 239(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 604(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 348(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 7 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 238(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 602(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 346(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 6 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 237(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 600(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 344(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a3, v8 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 5 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 236(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 598(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 342(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a4, v8 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 235(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 596(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 340(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s s8, v8 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 3 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 234(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 594(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 338(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s s9, v8 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: sb a0, 233(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 592(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN32-NEXT: lh t5, 336(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 1 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN32-NEXT: vmv.x.s s7, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, t5 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa3, a2 |
| ; ZVFHMIN32-NEXT: sb a0, 232(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 590(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa2, a3 |
| ; ZVFHMIN32-NEXT: lh a2, 334(sp) |
| ; ZVFHMIN32-NEXT: feq.h t5, fa5, fa3 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: feq.h t6, fa4, fa2 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s6 |
| ; ZVFHMIN32-NEXT: sb a0, 231(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 588(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 332(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN32-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s5 |
| ; ZVFHMIN32-NEXT: sb a0, 230(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 586(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 330(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s8 |
| ; ZVFHMIN32-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s4 |
| ; ZVFHMIN32-NEXT: sb a0, 229(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 584(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 328(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s9 |
| ; ZVFHMIN32-NEXT: feq.h s4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s3 |
| ; ZVFHMIN32-NEXT: sb a0, 228(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 582(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 326(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s2 |
| ; ZVFHMIN32-NEXT: sb a0, 227(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 580(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 324(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s7 |
| ; ZVFHMIN32-NEXT: feq.h s2, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 226(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 578(sp) |
| ; ZVFHMIN32-NEXT: lh a2, 322(sp) |
| ; ZVFHMIN32-NEXT: sb s2, 193(sp) |
| ; ZVFHMIN32-NEXT: sb a1, 194(sp) |
| ; ZVFHMIN32-NEXT: sb s4, 195(sp) |
| ; ZVFHMIN32-NEXT: sb a4, 196(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a3, 197(sp) |
| ; ZVFHMIN32-NEXT: sb t6, 198(sp) |
| ; ZVFHMIN32-NEXT: sb t5, 199(sp) |
| ; ZVFHMIN32-NEXT: sb a0, 225(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 766(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 510(sp) |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 18 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s s2, v8 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: li a3, 14 |
| ; ZVFHMIN32-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s t6, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 191(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 764(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 508(sp) |
| ; ZVFHMIN32-NEXT: vmv.x.s t5, v6 |
| ; ZVFHMIN32-NEXT: csrr a2, vlenb |
| ; ZVFHMIN32-NEXT: slli a2, a2, 2 |
| ; ZVFHMIN32-NEXT: add a2, sp, a2 |
| ; ZVFHMIN32-NEXT: addi a2, a2, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 190(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 762(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 506(sp) |
| ; ZVFHMIN32-NEXT: csrr a3, vlenb |
| ; ZVFHMIN32-NEXT: slli a3, a3, 3 |
| ; ZVFHMIN32-NEXT: add a3, sp, a3 |
| ; ZVFHMIN32-NEXT: addi a3, a3, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a3) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s a3, v8 |
| ; ZVFHMIN32-NEXT: csrr a4, vlenb |
| ; ZVFHMIN32-NEXT: li s3, 6 |
| ; ZVFHMIN32-NEXT: mul a4, a4, s3 |
| ; ZVFHMIN32-NEXT: add a4, sp, a4 |
| ; ZVFHMIN32-NEXT: addi a4, a4, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a4) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s a4, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 189(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 760(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 504(sp) |
| ; ZVFHMIN32-NEXT: csrr s3, vlenb |
| ; ZVFHMIN32-NEXT: li s4, 12 |
| ; ZVFHMIN32-NEXT: mul s3, s3, s4 |
| ; ZVFHMIN32-NEXT: add s3, sp, s3 |
| ; ZVFHMIN32-NEXT: addi s3, s3, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s s6, v8 |
| ; ZVFHMIN32-NEXT: csrr s3, vlenb |
| ; ZVFHMIN32-NEXT: li s4, 10 |
| ; ZVFHMIN32-NEXT: mul s3, s3, s4 |
| ; ZVFHMIN32-NEXT: add s3, sp, s3 |
| ; ZVFHMIN32-NEXT: addi s3, s3, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s s4, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 188(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 758(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 502(sp) |
| ; ZVFHMIN32-NEXT: csrr s3, vlenb |
| ; ZVFHMIN32-NEXT: slli s3, s3, 4 |
| ; ZVFHMIN32-NEXT: add s3, sp, s3 |
| ; ZVFHMIN32-NEXT: addi s3, s3, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s s5, v8 |
| ; ZVFHMIN32-NEXT: vmv.x.s s3, v16 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t4 |
| ; ZVFHMIN32-NEXT: sb a0, 187(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 756(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 500(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN32-NEXT: feq.h t4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t3 |
| ; ZVFHMIN32-NEXT: sb a0, 186(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 754(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 498(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a3 |
| ; ZVFHMIN32-NEXT: feq.h t3, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t1 |
| ; ZVFHMIN32-NEXT: sb a0, 185(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 752(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 496(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN32-NEXT: feq.h t1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t2 |
| ; ZVFHMIN32-NEXT: sb a0, 184(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 750(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 494(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s6 |
| ; ZVFHMIN32-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t0 |
| ; ZVFHMIN32-NEXT: sb a0, 183(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 748(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 492(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s4 |
| ; ZVFHMIN32-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a7 |
| ; ZVFHMIN32-NEXT: sb a0, 182(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 746(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 490(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s5 |
| ; ZVFHMIN32-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a6 |
| ; ZVFHMIN32-NEXT: sb a0, 181(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 744(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 488(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, s3 |
| ; ZVFHMIN32-NEXT: feq.h a6, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a5 |
| ; ZVFHMIN32-NEXT: addi a1, sp, 848 |
| ; ZVFHMIN32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload |
| ; ZVFHMIN32-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN32-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 15 |
| ; ZVFHMIN32-NEXT: vmv.x.s a5, v8 |
| ; ZVFHMIN32-NEXT: sb a0, 180(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 742(sp) |
| ; ZVFHMIN32-NEXT: lh a7, 486(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 179(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 740(sp) |
| ; ZVFHMIN32-NEXT: lh a7, 484(sp) |
| ; ZVFHMIN32-NEXT: sb a2, 140(sp) |
| ; ZVFHMIN32-NEXT: sb t1, 141(sp) |
| ; ZVFHMIN32-NEXT: sb t3, 142(sp) |
| ; ZVFHMIN32-NEXT: sb t4, 143(sp) |
| ; ZVFHMIN32-NEXT: sb a1, 136(sp) |
| ; ZVFHMIN32-NEXT: sb a6, 137(sp) |
| ; ZVFHMIN32-NEXT: sb a4, 138(sp) |
| ; ZVFHMIN32-NEXT: sb a3, 139(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 178(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 638(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 382(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 14 |
| ; ZVFHMIN32-NEXT: vmv.x.s t3, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 255(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 636(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 380(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 13 |
| ; ZVFHMIN32-NEXT: vmv.x.s t2, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 254(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 634(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 378(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 12 |
| ; ZVFHMIN32-NEXT: vmv.x.s t1, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 253(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 632(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 376(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 11 |
| ; ZVFHMIN32-NEXT: vmv.x.s t0, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 252(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 630(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 374(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 10 |
| ; ZVFHMIN32-NEXT: vmv.x.s a7, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 251(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 628(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 372(sp) |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 9 |
| ; ZVFHMIN32-NEXT: vmv.x.s a6, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: lw a1, 112(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 250(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 626(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 370(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a5 |
| ; ZVFHMIN32-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: lw a1, 116(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 249(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 624(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 368(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t3 |
| ; ZVFHMIN32-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: lw a1, 124(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 248(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 622(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 366(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t2 |
| ; ZVFHMIN32-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: lw a1, 108(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 247(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 620(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 364(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t1 |
| ; ZVFHMIN32-NEXT: feq.h a5, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: lw a1, 120(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 246(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 618(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 362(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, t0 |
| ; ZVFHMIN32-NEXT: feq.h t0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, s2 |
| ; ZVFHMIN32-NEXT: sb a0, 245(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 616(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 360(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN32-NEXT: feq.h a7, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t6 |
| ; ZVFHMIN32-NEXT: sb a0, 244(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 614(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 358(sp) |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a6 |
| ; ZVFHMIN32-NEXT: feq.h a6, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN32-NEXT: vslidedown.vi v8, v24, 8 |
| ; ZVFHMIN32-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: sb a0, 243(sp) |
| ; ZVFHMIN32-NEXT: lh a0, 612(sp) |
| ; ZVFHMIN32-NEXT: lh a1, 356(sp) |
| ; ZVFHMIN32-NEXT: sb a5, 204(sp) |
| ; ZVFHMIN32-NEXT: sb a4, 205(sp) |
| ; ZVFHMIN32-NEXT: sb a2, 206(sp) |
| ; ZVFHMIN32-NEXT: sb a3, 207(sp) |
| ; ZVFHMIN32-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a2, 200(sp) |
| ; ZVFHMIN32-NEXT: sb a6, 201(sp) |
| ; ZVFHMIN32-NEXT: sb a7, 202(sp) |
| ; ZVFHMIN32-NEXT: sb t0, 203(sp) |
| ; ZVFHMIN32-NEXT: li a2, 128 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN32-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN32-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN32-NEXT: sb a0, 242(sp) |
| ; ZVFHMIN32-NEXT: addi a0, sp, 128 |
| ; ZVFHMIN32-NEXT: vsetvli zero, a2, e8, m8, ta, ma |
| ; ZVFHMIN32-NEXT: vle8.v v8, (a0) |
| ; ZVFHMIN32-NEXT: vand.vi v8, v8, 1 |
| ; ZVFHMIN32-NEXT: vmsne.vi v0, v8, 0 |
| ; ZVFHMIN32-NEXT: addi sp, s0, -896 |
| ; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 896 |
| ; ZVFHMIN32-NEXT: lw ra, 892(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s0, 888(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s2, 884(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s3, 880(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s4, 876(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s5, 872(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s6, 868(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s7, 864(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s8, 860(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s9, 856(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s10, 852(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: lw s11, 848(sp) # 4-byte Folded Reload |
| ; ZVFHMIN32-NEXT: .cfi_restore ra |
| ; ZVFHMIN32-NEXT: .cfi_restore s0 |
| ; ZVFHMIN32-NEXT: .cfi_restore s2 |
| ; ZVFHMIN32-NEXT: .cfi_restore s3 |
| ; ZVFHMIN32-NEXT: .cfi_restore s4 |
| ; ZVFHMIN32-NEXT: .cfi_restore s5 |
| ; ZVFHMIN32-NEXT: .cfi_restore s6 |
| ; ZVFHMIN32-NEXT: .cfi_restore s7 |
| ; ZVFHMIN32-NEXT: .cfi_restore s8 |
| ; ZVFHMIN32-NEXT: .cfi_restore s9 |
| ; ZVFHMIN32-NEXT: .cfi_restore s10 |
| ; ZVFHMIN32-NEXT: .cfi_restore s11 |
| ; ZVFHMIN32-NEXT: addi sp, sp, 896 |
| ; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0 |
| ; ZVFHMIN32-NEXT: ret |
| ; |
| ; ZVFHMIN64-LABEL: fcmp_oeq_vv_v128f16: |
| ; ZVFHMIN64: # %bb.0: |
| ; ZVFHMIN64-NEXT: addi sp, sp, -896 |
| ; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 896 |
| ; ZVFHMIN64-NEXT: sd ra, 888(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s0, 880(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s2, 872(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s3, 864(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s4, 856(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s5, 848(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s6, 840(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s7, 832(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s8, 824(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s9, 816(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s10, 808(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: sd s11, 800(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: .cfi_offset ra, -8 |
| ; ZVFHMIN64-NEXT: .cfi_offset s0, -16 |
| ; ZVFHMIN64-NEXT: .cfi_offset s2, -24 |
| ; ZVFHMIN64-NEXT: .cfi_offset s3, -32 |
| ; ZVFHMIN64-NEXT: .cfi_offset s4, -40 |
| ; ZVFHMIN64-NEXT: .cfi_offset s5, -48 |
| ; ZVFHMIN64-NEXT: .cfi_offset s6, -56 |
| ; ZVFHMIN64-NEXT: .cfi_offset s7, -64 |
| ; ZVFHMIN64-NEXT: .cfi_offset s8, -72 |
| ; ZVFHMIN64-NEXT: .cfi_offset s9, -80 |
| ; ZVFHMIN64-NEXT: .cfi_offset s10, -88 |
| ; ZVFHMIN64-NEXT: .cfi_offset s11, -96 |
| ; ZVFHMIN64-NEXT: addi s0, sp, 896 |
| ; ZVFHMIN64-NEXT: .cfi_def_cfa s0, 0 |
| ; ZVFHMIN64-NEXT: csrr a1, vlenb |
| ; ZVFHMIN64-NEXT: li a2, 30 |
| ; ZVFHMIN64-NEXT: mul a1, a1, a2 |
| ; ZVFHMIN64-NEXT: sub sp, sp, a1 |
| ; ZVFHMIN64-NEXT: andi sp, sp, -128 |
| ; ZVFHMIN64-NEXT: addi a1, a0, 128 |
| ; ZVFHMIN64-NEXT: li a2, 64 |
| ; ZVFHMIN64-NEXT: addi a3, sp, 640 |
| ; ZVFHMIN64-NEXT: addi a4, sp, 384 |
| ; ZVFHMIN64-NEXT: addi a5, sp, 512 |
| ; ZVFHMIN64-NEXT: vsetvli zero, a2, e16, m8, ta, ma |
| ; ZVFHMIN64-NEXT: vle16.v v0, (a0) |
| ; ZVFHMIN64-NEXT: addi a0, sp, 256 |
| ; ZVFHMIN64-NEXT: vle16.v v24, (a1) |
| ; ZVFHMIN64-NEXT: vse16.v v8, (a3) |
| ; ZVFHMIN64-NEXT: vse16.v v0, (a4) |
| ; ZVFHMIN64-NEXT: vse16.v v16, (a5) |
| ; ZVFHMIN64-NEXT: vse16.v v24, (a0) |
| ; ZVFHMIN64-NEXT: lh a0, 704(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 448(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 160(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 702(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 446(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 159(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 700(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 444(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 158(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 698(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 442(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 157(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 696(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 440(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 156(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 694(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 438(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 155(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 692(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 436(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 154(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 690(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 434(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 153(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 688(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 432(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 152(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 686(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 430(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 151(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 684(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 428(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 150(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 682(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 426(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 149(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 680(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 424(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 148(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 678(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 422(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 147(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 676(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 420(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 146(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 674(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 418(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a2 |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa4, fa3 |
| ; ZVFHMIN64-NEXT: sb a0, 145(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 672(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 416(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a2, 128(sp) |
| ; ZVFHMIN64-NEXT: sb a0, 144(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 576(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 320(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 224(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 574(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 318(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 223(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 572(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 316(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 222(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 570(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 314(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 221(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 568(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 312(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 220(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 566(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 310(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 219(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 564(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 308(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 218(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 562(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 306(sp) |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 7 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 29 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 6 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 28 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 5 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 27 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 4 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 26 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 3 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 25 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 2 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 24 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 1 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 23 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v26, v8, 15 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v20, v8, 14 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v28, v8, 13 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v8, 12 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 1 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v10, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v4, v8, 11 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v2, v8, 10 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v30, v8, 9 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v22, v8, 8 |
| ; ZVFHMIN64-NEXT: vmv.x.s a4, v16 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 217(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 560(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 304(sp) |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v3, v16, 7 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v31, v16, 6 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v5, v16, 5 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v23, v16, 4 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v16, 3 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 21 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v16, 2 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 20 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v16, 1 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 22 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v18, v16, 15 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v14, v16, 14 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v16, 13 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v12, v16, 12 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v10, v16, 11 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v6, v16, 10 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 18 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v6, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v6, v16, 9 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 14 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v6, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v6, v16, 8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 216(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 558(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 302(sp) |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v13, v0, 7 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v29, v0, 6 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v11, v0, 5 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v7, v0, 4 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v9, v0, 3 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v21, v0, 2 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v27, v0, 1 |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 15 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 2 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 14 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 13 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 6 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 12 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 12 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 11 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 10 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 10 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 4 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v16, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vslidedown.vi v16, v0, 9 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v0, v0, 8 |
| ; ZVFHMIN64-NEXT: addi a2, sp, 800 |
| ; ZVFHMIN64-NEXT: vs2r.v v0, (a2) # Unknown-size Folded Spill |
| ; ZVFHMIN64-NEXT: vmv.x.s t4, v26 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 215(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 556(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 300(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s t3, v20 |
| ; ZVFHMIN64-NEXT: vmv.x.s t1, v28 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 214(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 554(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 298(sp) |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 1 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v0, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s t2, v0 |
| ; ZVFHMIN64-NEXT: vmv.x.s t0, v4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 213(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 552(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 296(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a7, v2 |
| ; ZVFHMIN64-NEXT: vmv.x.s a6, v30 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 212(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 550(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 294(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a5, v22 |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v18 |
| ; ZVFHMIN64-NEXT: sd a2, 96(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 211(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 548(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 292(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a0, v14 |
| ; ZVFHMIN64-NEXT: sd a0, 104(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: vmv.x.s a0, v8 |
| ; ZVFHMIN64-NEXT: sd a0, 120(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t5 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a1, 210(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 546(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 290(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a4 |
| ; ZVFHMIN64-NEXT: vmv.x.s a4, v24 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, t5 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa4, fa3 |
| ; ZVFHMIN64-NEXT: sb a1, 209(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 544(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 288(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN64-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t5 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a4, 192(sp) |
| ; ZVFHMIN64-NEXT: sb a1, 208(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 738(sp) |
| ; ZVFHMIN64-NEXT: lh t6, 482(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a0, v12 |
| ; ZVFHMIN64-NEXT: sd a0, 88(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: vmv.x.s a0, v10 |
| ; ZVFHMIN64-NEXT: sd a0, 112(sp) # 8-byte Folded Spill |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN64-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb t5, 177(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 736(sp) |
| ; ZVFHMIN64-NEXT: lh t6, 480(sp) |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 29 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s5, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 28 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s6, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN64-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb t5, 176(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 734(sp) |
| ; ZVFHMIN64-NEXT: lh t6, 478(sp) |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 27 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s7, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 26 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s8, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN64-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb t5, 175(sp) |
| ; ZVFHMIN64-NEXT: lh t5, 732(sp) |
| ; ZVFHMIN64-NEXT: lh t6, 476(sp) |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 25 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s4, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 24 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s3, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN64-NEXT: feq.h t5, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb t5, 174(sp) |
| ; ZVFHMIN64-NEXT: lh t6, 730(sp) |
| ; ZVFHMIN64-NEXT: lh s9, 474(sp) |
| ; ZVFHMIN64-NEXT: csrr a0, vlenb |
| ; ZVFHMIN64-NEXT: li a1, 23 |
| ; ZVFHMIN64-NEXT: mul a0, a0, a1 |
| ; ZVFHMIN64-NEXT: add a0, sp, a0 |
| ; ZVFHMIN64-NEXT: lh s2, 800(a0) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s t5, v3 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t6 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s9 |
| ; ZVFHMIN64-NEXT: feq.h t6, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb t6, 173(sp) |
| ; ZVFHMIN64-NEXT: lh s9, 728(sp) |
| ; ZVFHMIN64-NEXT: lh s10, 472(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s t6, v31 |
| ; ZVFHMIN64-NEXT: vmv.x.s ra, v13 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s9 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s10 |
| ; ZVFHMIN64-NEXT: feq.h s9, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb s9, 172(sp) |
| ; ZVFHMIN64-NEXT: lh s9, 726(sp) |
| ; ZVFHMIN64-NEXT: lh s10, 470(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v29 |
| ; ZVFHMIN64-NEXT: vmv.x.s a3, v11 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s9 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s10 |
| ; ZVFHMIN64-NEXT: feq.h s9, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb s9, 171(sp) |
| ; ZVFHMIN64-NEXT: lh s10, 724(sp) |
| ; ZVFHMIN64-NEXT: lh s11, 468(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a4, v7 |
| ; ZVFHMIN64-NEXT: vmv.x.s s9, v9 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s10 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s11 |
| ; ZVFHMIN64-NEXT: feq.h s10, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb s10, 170(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 722(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 466(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s s10, v21 |
| ; ZVFHMIN64-NEXT: vmv.x.s s11, v27 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 169(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 720(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 464(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s6 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 168(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 718(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 462(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, s7 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, s8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa1, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa0, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa1, fa0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa1, ra |
| ; ZVFHMIN64-NEXT: sb a0, 167(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 716(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa0, a2 |
| ; ZVFHMIN64-NEXT: lh a1, 460(sp) |
| ; ZVFHMIN64-NEXT: feq.h s5, fa5, fa1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa4, fa0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s4 |
| ; ZVFHMIN64-NEXT: sb a1, 166(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 714(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 458(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a3 |
| ; ZVFHMIN64-NEXT: feq.h a3, fa3, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a2 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa4, fa3 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s3 |
| ; ZVFHMIN64-NEXT: sb a1, 165(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 712(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 456(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a4 |
| ; ZVFHMIN64-NEXT: feq.h a4, fa2, fa3 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a2 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa3, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, s2 |
| ; ZVFHMIN64-NEXT: sb a1, 164(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 710(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 454(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, s9 |
| ; ZVFHMIN64-NEXT: feq.h s2, fa5, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a2 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s10 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, s11 |
| ; ZVFHMIN64-NEXT: sb a1, 163(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 708(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 452(sp) |
| ; ZVFHMIN64-NEXT: feq.h s3, fa4, fa5 |
| ; ZVFHMIN64-NEXT: feq.h s4, fa3, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a1, 162(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 706(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 450(sp) |
| ; ZVFHMIN64-NEXT: sb s4, 129(sp) |
| ; ZVFHMIN64-NEXT: sb s3, 130(sp) |
| ; ZVFHMIN64-NEXT: sb s2, 131(sp) |
| ; ZVFHMIN64-NEXT: sb a4, 132(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a3, 133(sp) |
| ; ZVFHMIN64-NEXT: sb a0, 134(sp) |
| ; ZVFHMIN64-NEXT: sb s5, 135(sp) |
| ; ZVFHMIN64-NEXT: sb a1, 161(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 610(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 354(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s s6, v5 |
| ; ZVFHMIN64-NEXT: vmv.x.s s5, v23 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 241(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 608(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 352(sp) |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 21 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: lh s4, 800(a2) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 20 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: lh s3, 800(a2) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 240(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 606(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 350(sp) |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 22 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: lh s2, 800(a2) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa4, fa3 |
| ; ZVFHMIN64-NEXT: sb a0, 239(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 604(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 348(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t6 |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 7 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 238(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 602(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 346(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 6 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 237(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 600(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 344(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a3, v8 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 5 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 236(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 598(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 342(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a4, v8 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 235(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 596(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 340(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s s8, v8 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 3 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 234(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 594(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 338(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s s9, v8 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: sb a0, 233(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 592(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN64-NEXT: lh t5, 336(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 1 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a0 |
| ; ZVFHMIN64-NEXT: vmv.x.s s7, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, t5 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa3, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa3, a2 |
| ; ZVFHMIN64-NEXT: sb a0, 232(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 590(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa2, a3 |
| ; ZVFHMIN64-NEXT: lh a2, 334(sp) |
| ; ZVFHMIN64-NEXT: feq.h t5, fa5, fa3 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: feq.h t6, fa4, fa2 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s6 |
| ; ZVFHMIN64-NEXT: sb a0, 231(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 588(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 332(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN64-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s5 |
| ; ZVFHMIN64-NEXT: sb a0, 230(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 586(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 330(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s8 |
| ; ZVFHMIN64-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s4 |
| ; ZVFHMIN64-NEXT: sb a0, 229(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 584(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 328(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s9 |
| ; ZVFHMIN64-NEXT: feq.h s4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s3 |
| ; ZVFHMIN64-NEXT: sb a0, 228(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 582(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 326(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s2 |
| ; ZVFHMIN64-NEXT: sb a0, 227(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 580(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 324(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s7 |
| ; ZVFHMIN64-NEXT: feq.h s2, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 226(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 578(sp) |
| ; ZVFHMIN64-NEXT: lh a2, 322(sp) |
| ; ZVFHMIN64-NEXT: sb s2, 193(sp) |
| ; ZVFHMIN64-NEXT: sb a1, 194(sp) |
| ; ZVFHMIN64-NEXT: sb s4, 195(sp) |
| ; ZVFHMIN64-NEXT: sb a4, 196(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a3, 197(sp) |
| ; ZVFHMIN64-NEXT: sb t6, 198(sp) |
| ; ZVFHMIN64-NEXT: sb t5, 199(sp) |
| ; ZVFHMIN64-NEXT: sb a0, 225(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 766(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 510(sp) |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 18 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s s2, v8 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: li a3, 14 |
| ; ZVFHMIN64-NEXT: mul a2, a2, a3 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s t6, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 191(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 764(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 508(sp) |
| ; ZVFHMIN64-NEXT: vmv.x.s t5, v6 |
| ; ZVFHMIN64-NEXT: csrr a2, vlenb |
| ; ZVFHMIN64-NEXT: slli a2, a2, 2 |
| ; ZVFHMIN64-NEXT: add a2, sp, a2 |
| ; ZVFHMIN64-NEXT: addi a2, a2, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s a2, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 190(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 762(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 506(sp) |
| ; ZVFHMIN64-NEXT: csrr a3, vlenb |
| ; ZVFHMIN64-NEXT: slli a3, a3, 3 |
| ; ZVFHMIN64-NEXT: add a3, sp, a3 |
| ; ZVFHMIN64-NEXT: addi a3, a3, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a3) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s a3, v8 |
| ; ZVFHMIN64-NEXT: csrr a4, vlenb |
| ; ZVFHMIN64-NEXT: li s3, 6 |
| ; ZVFHMIN64-NEXT: mul a4, a4, s3 |
| ; ZVFHMIN64-NEXT: add a4, sp, a4 |
| ; ZVFHMIN64-NEXT: addi a4, a4, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a4) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s a4, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 189(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 760(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 504(sp) |
| ; ZVFHMIN64-NEXT: csrr s3, vlenb |
| ; ZVFHMIN64-NEXT: li s4, 12 |
| ; ZVFHMIN64-NEXT: mul s3, s3, s4 |
| ; ZVFHMIN64-NEXT: add s3, sp, s3 |
| ; ZVFHMIN64-NEXT: addi s3, s3, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s s6, v8 |
| ; ZVFHMIN64-NEXT: csrr s3, vlenb |
| ; ZVFHMIN64-NEXT: li s4, 10 |
| ; ZVFHMIN64-NEXT: mul s3, s3, s4 |
| ; ZVFHMIN64-NEXT: add s3, sp, s3 |
| ; ZVFHMIN64-NEXT: addi s3, s3, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s s4, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 188(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 758(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 502(sp) |
| ; ZVFHMIN64-NEXT: csrr s3, vlenb |
| ; ZVFHMIN64-NEXT: slli s3, s3, 4 |
| ; ZVFHMIN64-NEXT: add s3, sp, s3 |
| ; ZVFHMIN64-NEXT: addi s3, s3, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (s3) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s s5, v8 |
| ; ZVFHMIN64-NEXT: vmv.x.s s3, v16 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t4 |
| ; ZVFHMIN64-NEXT: sb a0, 187(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 756(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 500(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a2 |
| ; ZVFHMIN64-NEXT: feq.h t4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t3 |
| ; ZVFHMIN64-NEXT: sb a0, 186(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 754(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 498(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a3 |
| ; ZVFHMIN64-NEXT: feq.h t3, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t1 |
| ; ZVFHMIN64-NEXT: sb a0, 185(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 752(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 496(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a4 |
| ; ZVFHMIN64-NEXT: feq.h t1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t2 |
| ; ZVFHMIN64-NEXT: sb a0, 184(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 750(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 494(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s6 |
| ; ZVFHMIN64-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t0 |
| ; ZVFHMIN64-NEXT: sb a0, 183(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 748(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 492(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s4 |
| ; ZVFHMIN64-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a7 |
| ; ZVFHMIN64-NEXT: sb a0, 182(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 746(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 490(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s5 |
| ; ZVFHMIN64-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a6 |
| ; ZVFHMIN64-NEXT: sb a0, 181(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 744(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 488(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, s3 |
| ; ZVFHMIN64-NEXT: feq.h a6, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a5 |
| ; ZVFHMIN64-NEXT: addi a1, sp, 800 |
| ; ZVFHMIN64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload |
| ; ZVFHMIN64-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN64-NEXT: vsetivli zero, 1, e16, m2, ta, ma |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 15 |
| ; ZVFHMIN64-NEXT: vmv.x.s a5, v8 |
| ; ZVFHMIN64-NEXT: sb a0, 180(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 742(sp) |
| ; ZVFHMIN64-NEXT: lh a7, 486(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a1, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 179(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 740(sp) |
| ; ZVFHMIN64-NEXT: lh a7, 484(sp) |
| ; ZVFHMIN64-NEXT: sb a2, 140(sp) |
| ; ZVFHMIN64-NEXT: sb t1, 141(sp) |
| ; ZVFHMIN64-NEXT: sb t3, 142(sp) |
| ; ZVFHMIN64-NEXT: sb t4, 143(sp) |
| ; ZVFHMIN64-NEXT: sb a1, 136(sp) |
| ; ZVFHMIN64-NEXT: sb a6, 137(sp) |
| ; ZVFHMIN64-NEXT: sb a4, 138(sp) |
| ; ZVFHMIN64-NEXT: sb a3, 139(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 178(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 638(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 382(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 14 |
| ; ZVFHMIN64-NEXT: vmv.x.s t3, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 255(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 636(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 380(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 13 |
| ; ZVFHMIN64-NEXT: vmv.x.s t2, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 254(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 634(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 378(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 12 |
| ; ZVFHMIN64-NEXT: vmv.x.s t1, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 253(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 632(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 376(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 11 |
| ; ZVFHMIN64-NEXT: vmv.x.s t0, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 252(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 630(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 374(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 10 |
| ; ZVFHMIN64-NEXT: vmv.x.s a7, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 251(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 628(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 372(sp) |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 9 |
| ; ZVFHMIN64-NEXT: vmv.x.s a6, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: ld a1, 96(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 250(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 626(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 370(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a5 |
| ; ZVFHMIN64-NEXT: feq.h a3, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: ld a1, 104(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 249(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 624(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 368(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t3 |
| ; ZVFHMIN64-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: ld a1, 120(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 248(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 622(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 366(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t2 |
| ; ZVFHMIN64-NEXT: feq.h a4, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: ld a1, 88(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 247(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 620(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 364(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t1 |
| ; ZVFHMIN64-NEXT: feq.h a5, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: ld a1, 112(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 246(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 618(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 362(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, t0 |
| ; ZVFHMIN64-NEXT: feq.h t0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, s2 |
| ; ZVFHMIN64-NEXT: sb a0, 245(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 616(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 360(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a7 |
| ; ZVFHMIN64-NEXT: feq.h a7, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t6 |
| ; ZVFHMIN64-NEXT: sb a0, 244(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 614(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 358(sp) |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a6 |
| ; ZVFHMIN64-NEXT: feq.h a6, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, t5 |
| ; ZVFHMIN64-NEXT: vslidedown.vi v8, v24, 8 |
| ; ZVFHMIN64-NEXT: vmv.x.s a1, v8 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: sb a0, 243(sp) |
| ; ZVFHMIN64-NEXT: lh a0, 612(sp) |
| ; ZVFHMIN64-NEXT: lh a1, 356(sp) |
| ; ZVFHMIN64-NEXT: sb a5, 204(sp) |
| ; ZVFHMIN64-NEXT: sb a4, 205(sp) |
| ; ZVFHMIN64-NEXT: sb a2, 206(sp) |
| ; ZVFHMIN64-NEXT: sb a3, 207(sp) |
| ; ZVFHMIN64-NEXT: feq.h a2, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a2, 200(sp) |
| ; ZVFHMIN64-NEXT: sb a6, 201(sp) |
| ; ZVFHMIN64-NEXT: sb a7, 202(sp) |
| ; ZVFHMIN64-NEXT: sb t0, 203(sp) |
| ; ZVFHMIN64-NEXT: li a2, 128 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa5, a0 |
| ; ZVFHMIN64-NEXT: fmv.h.x fa4, a1 |
| ; ZVFHMIN64-NEXT: feq.h a0, fa5, fa4 |
| ; ZVFHMIN64-NEXT: sb a0, 242(sp) |
| ; ZVFHMIN64-NEXT: addi a0, sp, 128 |
| ; ZVFHMIN64-NEXT: vsetvli zero, a2, e8, m8, ta, ma |
| ; ZVFHMIN64-NEXT: vle8.v v8, (a0) |
| ; ZVFHMIN64-NEXT: vand.vi v8, v8, 1 |
| ; ZVFHMIN64-NEXT: vmsne.vi v0, v8, 0 |
| ; ZVFHMIN64-NEXT: addi sp, s0, -896 |
| ; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 896 |
| ; ZVFHMIN64-NEXT: ld ra, 888(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s0, 880(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s2, 872(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s3, 864(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s4, 856(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s5, 848(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s6, 840(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s7, 832(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s8, 824(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s9, 816(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s10, 808(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: ld s11, 800(sp) # 8-byte Folded Reload |
| ; ZVFHMIN64-NEXT: .cfi_restore ra |
| ; ZVFHMIN64-NEXT: .cfi_restore s0 |
| ; ZVFHMIN64-NEXT: .cfi_restore s2 |
| ; ZVFHMIN64-NEXT: .cfi_restore s3 |
| ; ZVFHMIN64-NEXT: .cfi_restore s4 |
| ; ZVFHMIN64-NEXT: .cfi_restore s5 |
| ; ZVFHMIN64-NEXT: .cfi_restore s6 |
| ; ZVFHMIN64-NEXT: .cfi_restore s7 |
| ; ZVFHMIN64-NEXT: .cfi_restore s8 |
| ; ZVFHMIN64-NEXT: .cfi_restore s9 |
| ; ZVFHMIN64-NEXT: .cfi_restore s10 |
| ; ZVFHMIN64-NEXT: .cfi_restore s11 |
| ; ZVFHMIN64-NEXT: addi sp, sp, 896 |
| ; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0 |
| ; ZVFHMIN64-NEXT: ret |
| %v = call <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half> %va, <128 x half> %vb, metadata !"oeq", <128 x i1> %m, i32 %evl) |
| ret <128 x i1> %v |
| } |
| |
| declare <7 x i1> @llvm.vp.fcmp.v7f64(<7 x double>, <7 x double>, metadata, <7 x i1>, i32) |
| |
| define <7 x i1> @fcmp_oeq_vv_v7f64(<7 x double> %va, <7 x double> %vb, <7 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oeq_vv_v7f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <7 x i1> @llvm.vp.fcmp.v7f64(<7 x double> %va, <7 x double> %vb, metadata !"oeq", <7 x i1> %m, i32 %evl) |
| ret <7 x i1> %v |
| } |
| |
| declare <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double>, <8 x double>, metadata, <8 x i1>, i32) |
| |
| define <8 x i1> @fcmp_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oeq_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oeq_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oeq_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oeq_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oeq_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"oeq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ogt_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ogt_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ogt_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ogt_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ogt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oge_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oge_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_oge_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oge_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"oge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_olt_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_olt_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_olt_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_olt_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"olt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ole_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ole_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ole_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ole_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ole", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_one_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmflt.vv v17, v12, v8, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v17, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_one_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v13, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_one_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_one_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v13, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"one", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ord_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v16, v12, v12, v0.t |
| ; CHECK-NEXT: vmfeq.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmand.mm v0, v12, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ord_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vf v16, v12, fa0, v0.t |
| ; CHECK-NEXT: vmfeq.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmand.mm v0, v12, v16 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ord_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ord_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfeq.vf v16, v12, fa0, v0.t |
| ; CHECK-NEXT: vmfeq.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmand.mm v0, v16, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ord", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ueq_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmflt.vv v17, v12, v8, v0.t |
| ; CHECK-NEXT: vmnor.mm v0, v17, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ueq_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnor.mm v0, v13, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ueq_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ueq_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnor.mm v0, v13, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ueq", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ugt_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ugt_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ugt_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ugt_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uge_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uge_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uge_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uge_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"uge", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ult_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ult_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ult_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ult_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ult", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ule_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ule_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_ule_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_ule_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmnot.m v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"ule", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_une_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vv v16, v8, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_une_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_une_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_une_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vf v12, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"une", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uno_vv_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vv v16, v12, v12, v0.t |
| ; CHECK-NEXT: vmfne.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v12, v16 |
| ; CHECK-NEXT: ret |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uno_vf_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vf v16, v12, fa0, v0.t |
| ; CHECK-NEXT: vmfne.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v12, v16 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| define <8 x i1> @fcmp_uno_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_uno_vf_swap_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vmfne.vf v16, v12, fa0, v0.t |
| ; CHECK-NEXT: vmfne.vv v12, v8, v8, v0.t |
| ; CHECK-NEXT: vmor.mm v0, v16, v12 |
| ; CHECK-NEXT: ret |
| %elt.head = insertelement <8 x double> poison, double %b, i32 0 |
| %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer |
| %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %vb, <8 x double> %va, metadata !"uno", <8 x i1> %m, i32 %evl) |
| ret <8 x i1> %v |
| } |
| |
| declare <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double>, <32 x double>, metadata, <32 x i1>, i32) |
| |
| define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: fcmp_oeq_vv_v32f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 4 |
| ; CHECK-NEXT: sub sp, sp, a1 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: add a1, sp, a1 |
| ; CHECK-NEXT: addi a1, a1, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill |
| ; CHECK-NEXT: addi a1, a0, 128 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma |
| ; CHECK-NEXT: vle64.v v16, (a1) |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill |
| ; CHECK-NEXT: vle64.v v16, (a0) |
| ; CHECK-NEXT: li a1, 16 |
| ; CHECK-NEXT: mv a0, a2 |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v24, v0, 2 |
| ; CHECK-NEXT: bltu a2, a1, .LBB87_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: li a0, 16 |
| ; CHECK-NEXT: .LBB87_2: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v7, v8, v16, v0.t |
| ; CHECK-NEXT: addi a0, a2, -16 |
| ; CHECK-NEXT: sltu a1, a2, a0 |
| ; CHECK-NEXT: addi a1, a1, -1 |
| ; CHECK-NEXT: and a0, a1, a0 |
| ; CHECK-NEXT: vmv1r.v v0, v24 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: add a1, sp, a1 |
| ; CHECK-NEXT: addi a1, a1, 16 |
| ; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v8, v16, v24, v0.t |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslideup.vi v7, v8, 2 |
| ; CHECK-NEXT: vmv1r.v v0, v7 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 4 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %v = call <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double> %va, <32 x double> %vb, metadata !"oeq", <32 x i1> %m, i32 %evl) |
| ret <32 x i1> %v |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; ZVFH32: {{.*}} |
| ; ZVFH64: {{.*}} |