| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s |
| |
| declare <32 x i8> @llvm.loongarch.lasx.xvmod.b(<32 x i8>, <32 x i8>) |
| |
| define <32 x i8> @lasx_xvmod_b(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.b $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <32 x i8> @llvm.loongarch.lasx.xvmod.b(<32 x i8> %va, <32 x i8> %vb) |
| ret <32 x i8> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvmod.h(<16 x i16>, <16 x i16>) |
| |
| define <16 x i16> @lasx_xvmod_h(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.h $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvmod.h(<16 x i16> %va, <16 x i16> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvmod.w(<8 x i32>, <8 x i32>) |
| |
| define <8 x i32> @lasx_xvmod_w(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.w $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvmod.w(<8 x i32> %va, <8 x i32> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvmod.d(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvmod_d(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvmod.d(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <32 x i8> @llvm.loongarch.lasx.xvmod.bu(<32 x i8>, <32 x i8>) |
| |
| define <32 x i8> @lasx_xvmod_bu(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_bu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.bu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <32 x i8> @llvm.loongarch.lasx.xvmod.bu(<32 x i8> %va, <32 x i8> %vb) |
| ret <32 x i8> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvmod.hu(<16 x i16>, <16 x i16>) |
| |
| define <16 x i16> @lasx_xvmod_hu(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_hu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.hu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvmod.hu(<16 x i16> %va, <16 x i16> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvmod.wu(<8 x i32>, <8 x i32>) |
| |
| define <8 x i32> @lasx_xvmod_wu(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_wu: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.wu $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvmod.wu(<8 x i32> %va, <8 x i32> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvmod.du(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvmod_du(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvmod_du: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvmod.du $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvmod.du(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |