| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| ; |
| ; SABD |
| ; |
| |
| define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| ; CHECK-LABEL: sabd_b: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vmin.vv v12, v8, v10 |
| ; CHECK-NEXT: vmax.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16> |
| %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16> |
| %sub = sub <vscale x 16 x i16> %a.sext, %b.sext |
| %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) |
| %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> |
| ret <vscale x 16 x i8> %trunc |
| } |
| |
| define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { |
| ; CHECK-LABEL: sabd_b_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8> |
| %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8> |
| %sub = sub <vscale x 16 x i8> %a.sext, %b.sext |
| %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) |
| ret <vscale x 16 x i8> %abs |
| } |
| |
| define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| ; CHECK-LABEL: sabd_h: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vmin.vv v12, v8, v10 |
| ; CHECK-NEXT: vmax.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32> |
| %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32> |
| %sub = sub <vscale x 8 x i32> %a.sext, %b.sext |
| %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) |
| %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> |
| ret <vscale x 8 x i16> %trunc |
| } |
| |
| define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { |
| ; CHECK-LABEL: sabd_h_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vmin.vv v10, v8, v9 |
| ; CHECK-NEXT: vmax.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16> |
| %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16> |
| %sub = sub <vscale x 8 x i16> %a.sext, %b.sext |
| %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) |
| ret <vscale x 8 x i16> %abs |
| } |
| |
| define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| ; CHECK-LABEL: sabd_s: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vmin.vv v12, v8, v10 |
| ; CHECK-NEXT: vmax.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64> |
| %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %a.sext, %b.sext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| ret <vscale x 4 x i32> %trunc |
| } |
| |
| define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { |
| ; CHECK-LABEL: sabd_s_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vmin.vv v10, v8, v9 |
| ; CHECK-NEXT: vmax.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32> |
| %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %a.sext, %b.sext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| ret <vscale x 4 x i32> %abs |
| } |
| |
| ; FIXME: Crashes legalization if enabled |
| ;; define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ;; %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128> |
| ;; %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128> |
| ;; %sub = sub <vscale x 2 x i128> %a.sext, %b.sext |
| ;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) |
| ;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> |
| ;; ret <vscale x 2 x i64> %trunc |
| ;; } |
| |
| define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { |
| ; CHECK-LABEL: sabd_d_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma |
| ; CHECK-NEXT: vmin.vv v10, v8, v9 |
| ; CHECK-NEXT: vmax.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64> |
| %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64> |
| %sub = sub <vscale x 2 x i64> %a.sext, %b.sext |
| %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) |
| ret <vscale x 2 x i64> %abs |
| } |
| |
| ; |
| ; UABD |
| ; |
| |
| define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| ; CHECK-LABEL: uabd_b: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vminu.vv v12, v8, v10 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16> |
| %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16> |
| %sub = sub <vscale x 16 x i16> %a.zext, %b.zext |
| %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) |
| %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> |
| ret <vscale x 16 x i8> %trunc |
| } |
| |
| define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { |
| ; CHECK-LABEL: uabd_b_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8> |
| %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8> |
| %sub = sub <vscale x 16 x i8> %a.zext, %b.zext |
| %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) |
| ret <vscale x 16 x i8> %abs |
| } |
| |
| define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| ; CHECK-LABEL: uabd_h: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vminu.vv v12, v8, v10 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32> |
| %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32> |
| %sub = sub <vscale x 8 x i32> %a.zext, %b.zext |
| %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) |
| %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> |
| ret <vscale x 8 x i16> %trunc |
| } |
| |
| define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { |
| ; CHECK-LABEL: uabd_h_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vminu.vv v10, v8, v9 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16> |
| %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16> |
| %sub = sub <vscale x 8 x i16> %a.zext, %b.zext |
| %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) |
| ret <vscale x 8 x i16> %abs |
| } |
| |
| define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| ; CHECK-LABEL: uabd_s: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vminu.vv v12, v8, v10 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v10 |
| ; CHECK-NEXT: vsub.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64> |
| %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %a.zext, %b.zext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| ret <vscale x 4 x i32> %trunc |
| } |
| |
| define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { |
| ; CHECK-LABEL: uabd_s_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vminu.vv v10, v8, v9 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32> |
| %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %a.zext, %b.zext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| ret <vscale x 4 x i32> %abs |
| } |
| |
| ; FIXME: Crashes legalization if enabled |
| ;; define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ;; %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128> |
| ;; %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128> |
| ;; %sub = sub <vscale x 2 x i128> %a.zext, %b.zext |
| ;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) |
| ;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> |
| ;; ret <vscale x 2 x i64> %trunc |
| ;; } |
| |
| define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { |
| ; CHECK-LABEL: uabd_d_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma |
| ; CHECK-NEXT: vminu.vv v10, v8, v9 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsub.vv v10, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64> |
| %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64> |
| %sub = sub <vscale x 2 x i64> %a.zext, %b.zext |
| %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) |
| ret <vscale x 2 x i64> %abs |
| } |
| |
| ; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and |
| ; %b have differing types. |
| define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) { |
| ; CHECK-LABEL: uabd_non_matching_extension: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf4 v12, v10 |
| ; CHECK-NEXT: vminu.vv v10, v8, v12 |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v12 |
| ; CHECK-NEXT: vsub.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64> |
| %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %a.zext, %b.zext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| ret <vscale x 4 x i32> %trunc |
| } |
| |
| ; Test the situation where isLegal(ISD::ABD, typeof(%a.zext)) returns true but |
| ; %a and %b have differing types. |
| define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) { |
| ; CHECK-LABEL: uabd_non_matching_promoted_ops: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v10, v8 |
| ; CHECK-NEXT: vminu.vv v8, v10, v9 |
| ; CHECK-NEXT: vmaxu.vv v9, v10, v9 |
| ; CHECK-NEXT: vsub.vv v10, v9, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v8, v10 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32> |
| %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %a.zext, %b.zext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| ret <vscale x 4 x i32> %abs |
| } |
| |
| ; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and |
| ; %b are promoted differently. |
| define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) { |
| ; CHECK-LABEL: uabd_non_matching_promotion: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf4 v10, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: vwsub.wv v10, v10, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vrsub.vi v8, v10, 0 |
| ; CHECK-NEXT: vmax.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32> |
| %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %a.zext, %b.zext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| ret <vscale x 4 x i32> %abs |
| } |
| |
| declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1) |
| |
| declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1) |
| declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1) |
| |
| declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1) |
| declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1) |
| |
| declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1) |
| declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1) |
| |
| declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1) |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; RV32: {{.*}} |
| ; RV64: {{.*}} |