| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s |
| |
| declare <32 x i8> @llvm.loongarch.lasx.xvbitclr.b(<32 x i8>, <32 x i8>) |
| |
| define <32 x i8> @lasx_xvbitclr_b(<32 x i8> %va, <32 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclr_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclr.b $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <32 x i8> @llvm.loongarch.lasx.xvbitclr.b(<32 x i8> %va, <32 x i8> %vb) |
| ret <32 x i8> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvbitclr.h(<16 x i16>, <16 x i16>) |
| |
| define <16 x i16> @lasx_xvbitclr_h(<16 x i16> %va, <16 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclr_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclr.h $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvbitclr.h(<16 x i16> %va, <16 x i16> %vb) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvbitclr.w(<8 x i32>, <8 x i32>) |
| |
| define <8 x i32> @lasx_xvbitclr_w(<8 x i32> %va, <8 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclr_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclr.w $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvbitclr.w(<8 x i32> %va, <8 x i32> %vb) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvbitclr.d(<4 x i64>, <4 x i64>) |
| |
| define <4 x i64> @lasx_xvbitclr_d(<4 x i64> %va, <4 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclr_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclr.d $xr0, $xr0, $xr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvbitclr.d(<4 x i64> %va, <4 x i64> %vb) |
| ret <4 x i64> %res |
| } |
| |
| declare <32 x i8> @llvm.loongarch.lasx.xvbitclri.b(<32 x i8>, i32) |
| |
| define <32 x i8> @lasx_xvbitclri_b(<32 x i8> %va) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclri_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclri.b $xr0, $xr0, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <32 x i8> @llvm.loongarch.lasx.xvbitclri.b(<32 x i8> %va, i32 1) |
| ret <32 x i8> %res |
| } |
| |
| declare <16 x i16> @llvm.loongarch.lasx.xvbitclri.h(<16 x i16>, i32) |
| |
| define <16 x i16> @lasx_xvbitclri_h(<16 x i16> %va) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclri_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclri.h $xr0, $xr0, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i16> @llvm.loongarch.lasx.xvbitclri.h(<16 x i16> %va, i32 1) |
| ret <16 x i16> %res |
| } |
| |
| declare <8 x i32> @llvm.loongarch.lasx.xvbitclri.w(<8 x i32>, i32) |
| |
| define <8 x i32> @lasx_xvbitclri_w(<8 x i32> %va) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclri_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclri.w $xr0, $xr0, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i32> @llvm.loongarch.lasx.xvbitclri.w(<8 x i32> %va, i32 1) |
| ret <8 x i32> %res |
| } |
| |
| declare <4 x i64> @llvm.loongarch.lasx.xvbitclri.d(<4 x i64>, i32) |
| |
| define <4 x i64> @lasx_xvbitclri_d(<4 x i64> %va) nounwind { |
| ; CHECK-LABEL: lasx_xvbitclri_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xvbitclri.d $xr0, $xr0, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i64> @llvm.loongarch.lasx.xvbitclri.d(<4 x i64> %va, i32 1) |
| ret <4 x i64> %res |
| } |