| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.caf.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_caf_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_caf_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.caf.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.caf.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.caf.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_caf_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_caf_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.caf.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.caf.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cun.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cun_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cun_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cun.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cun.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cun.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cun_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cun_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cun.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cun.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.ceq.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_ceq_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_ceq_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.ceq.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.ceq.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.ceq.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_ceq_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_ceq_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.ceq.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.ceq.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cueq.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cueq_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cueq_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cueq.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cueq.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cueq.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cueq_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cueq_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cueq.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cueq.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.clt.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_clt_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_clt_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.clt.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.clt.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_clt_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_clt_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.clt.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cult.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cult_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cult_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cult.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cult.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cult.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cult_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cult_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cult.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cult.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cle.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cle_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cle_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cle.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cle.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cle.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cle_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cle_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cle.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cle.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cule.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cule_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cule_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cule.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cule.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cule.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cule_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cule_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cule.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cule.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cne.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cne_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cne_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cne.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cne.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cne.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cne_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cne_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cne.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cne.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cor.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cor_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cor_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cor.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cor.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cor.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cor_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cor_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cor.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cor.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cune.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_cune_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cune_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cune.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cune.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cune.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_cune_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_cune_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.cune.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cune.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.saf.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_saf_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_saf_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.saf.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.saf.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.saf.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_saf_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_saf_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.saf.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.saf.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sun.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sun_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sun_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sun.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sun.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sun.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sun_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sun_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sun.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sun.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.seq.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_seq_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_seq_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.seq.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.seq.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.seq.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_seq_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_seq_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.seq.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.seq.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sueq.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sueq_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sueq_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sueq.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sueq.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sueq.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sueq_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sueq_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sueq.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sueq.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.slt.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_slt_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_slt_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.slt.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.slt.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.slt.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_slt_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_slt_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.slt.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.slt.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sult.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sult_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sult_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sult.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sult.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sult.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sult_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sult_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sult.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sult.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sle.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sle_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sle_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sle.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sle.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sle.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sle_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sle_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sle.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sle.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sule.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sule_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sule_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sule.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sule.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sule.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sule_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sule_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sule.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sule.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sne.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sne_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sne_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sne.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sne.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sne.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sne_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sne_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sne.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sne.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sor.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sor_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sor_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sor.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sor.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sor.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sor_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sor_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sor.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sor.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sune.s(<8 x float>, <8 x float>) |
| |
| define <8 x i32> @lasx_xvfcmp_sune_s(<8 x float> %va, <8 x float> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sune_s: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sune.s $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sune.s(<8 x float> %va, <8 x float> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sune.d(<4 x double>, <4 x double>) |
| |
| define <4 x i64> @lasx_xvfcmp_sune_d(<4 x double> %va, <4 x double> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvfcmp_sune_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvfcmp.sune.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sune.d(<4 x double> %va, <4 x double> %vb) |
| ret <4 x i64> %res |
| } |