| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.b(<32 x i8>, <32 x i8>) |
| |
| define <16 x i16> @lasx_xvsubwev_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_h_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.h.b $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.b(<32 x i8> %va, <32 x i8> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.h(<16 x i16>, <16 x i16>) |
| |
| define <8 x i32> @lasx_xvsubwev_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_w_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.w.h $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.h(<16 x i16> %va, <16 x i16> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.w(<8 x i32>, <8 x i32>) |
| |
| define <4 x i64> @lasx_xvsubwev_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_d_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.d.w $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.w(<8 x i32> %va, <8 x i32> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.d(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvsubwev_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_q_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.q.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.d(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.bu(<32 x i8>, <32 x i8>) |
| |
| define <16 x i16> @lasx_xvsubwev_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_h_bu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.h.bu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.bu(<32 x i8> %va, <32 x i8> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.hu(<16 x i16>, <16 x i16>) |
| |
| define <8 x i32> @lasx_xvsubwev_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_w_hu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.w.hu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.hu(<16 x i16> %va, <16 x i16> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.wu(<8 x i32>, <8 x i32>) |
| |
| define <4 x i64> @lasx_xvsubwev_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_d_wu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.d.wu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.wu(<8 x i32> %va, <8 x i32> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.du(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvsubwev_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwev_q_du: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwev.q.du $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.du(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.b(<32 x i8>, <32 x i8>) |
| |
| define <16 x i16> @lasx_xvsubwod_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_h_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.h.b $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.b(<32 x i8> %va, <32 x i8> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.h(<16 x i16>, <16 x i16>) |
| |
| define <8 x i32> @lasx_xvsubwod_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_w_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.w.h $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.h(<16 x i16> %va, <16 x i16> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.w(<8 x i32>, <8 x i32>) |
| |
| define <4 x i64> @lasx_xvsubwod_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_d_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.d.w $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.w(<8 x i32> %va, <8 x i32> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.d(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvsubwod_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_q_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.q.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.d(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.bu(<32 x i8>, <32 x i8>) |
| |
| define <16 x i16> @lasx_xvsubwod_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_h_bu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.h.bu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.bu(<32 x i8> %va, <32 x i8> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.hu(<16 x i16>, <16 x i16>) |
| |
| define <8 x i32> @lasx_xvsubwod_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_w_hu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.w.hu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.hu(<16 x i16> %va, <16 x i16> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.wu(<8 x i32>, <8 x i32>) |
| |
| define <4 x i64> @lasx_xvsubwod_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_d_wu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.d.wu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.wu(<8 x i32> %va, <8 x i32> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.du(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvsubwod_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvsubwod_q_du: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvsubwod.q.du $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.du(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |