| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD |
| ; RUN: llc -mtriple aarch64-none-linux-gnu -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI |
| |
| ; CHECK-GI: warning: Instruction selection used fallback path for uaddlv_v8i8_urshr |
| |
| declare <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone |
| declare <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone |
| declare <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone |
| declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone |
| declare <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone |
| |
| declare <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8>) nounwind readnone |
| declare <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8>) nounwind readnone |
| declare <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16>) nounwind readnone |
| declare <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32>) nounwind readnone |
| declare <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16>) nounwind readnone |
| |
| declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) nounwind readnone |
| declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) nounwind readnone |
| declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone |
| declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone |
| declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone |
| |
| define i16 @uaddlv4h_from_v8i8(ptr %A) nounwind { |
| ; CHECK-LABEL: uaddlv4h_from_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: uaddlv h0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i8>, ptr %A |
| %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i16 @uaddlv16b_from_v16i8(ptr %A) nounwind { |
| ; CHECK-LABEL: uaddlv16b_from_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv h0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <16 x i8>, ptr %A |
| %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i32 @uaddlv8h_from_v8i16(ptr %A) nounwind { |
| ; CHECK-LABEL: uaddlv8h_from_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv s0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i16>, ptr %A |
| %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |
| |
| define i64 @uaddlv4s_from_v4i32(ptr %A) nounwind { |
| ; CHECK-LABEL: uaddlv4s_from_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv d0, v0.4s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i32>, ptr %A |
| %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1) |
| %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3) |
| ret i64 %tmp5 |
| } |
| |
| define i32 @uaddlv4h_from_v4i16(ptr %A) nounwind { |
| ; CHECK-LABEL: uaddlv4h_from_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: uaddlv s0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i16>, ptr %A |
| %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |
| |
| |
| |
| define i16 @saddlv4h_from_v8i8(ptr %A) nounwind { |
| ; CHECK-LABEL: saddlv4h_from_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: saddlv h0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i8>, ptr %A |
| %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i16 @saddlv16b_from_v16i8(ptr %A) nounwind { |
| ; CHECK-LABEL: saddlv16b_from_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: saddlv h0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <16 x i8>, ptr %A |
| %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i32 @saddlv8h_from_v8i16(ptr %A) nounwind { |
| ; CHECK-LABEL: saddlv8h_from_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: saddlv s0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i16>, ptr %A |
| %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |
| |
| define i64 @saddlv4s_from_v4i32(ptr %A) nounwind { |
| ; CHECK-LABEL: saddlv4s_from_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: saddlv d0, v0.4s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i32>, ptr %A |
| %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1) |
| %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3) |
| ret i64 %tmp5 |
| } |
| |
| define i32 @saddlv4h_from_v4i16(ptr %A) nounwind { |
| ; CHECK-LABEL: saddlv4h_from_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: saddlv s0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i16>, ptr %A |
| %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |
| |
| declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) nounwind readnone |
| |
| define i32 @uaddlv_known_bits_v8i8(<8 x i8> %a) { |
| ; CHECK-SD-LABEL: uaddlv_known_bits_v8i8: |
| ; CHECK-SD: // %bb.0: |
| ; CHECK-SD-NEXT: uaddlv h0, v0.8b |
| ; CHECK-SD-NEXT: fmov w0, s0 |
| ; CHECK-SD-NEXT: ret |
| ; |
| ; CHECK-GI-LABEL: uaddlv_known_bits_v8i8: |
| ; CHECK-GI: // %bb.0: |
| ; CHECK-GI-NEXT: uaddlv h0, v0.8b |
| ; CHECK-GI-NEXT: fmov w8, s0 |
| ; CHECK-GI-NEXT: and w0, w8, #0xffff |
| ; CHECK-GI-NEXT: ret |
| %tmp1 = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) |
| %tmp2 = and i32 %tmp1, 65535 |
| ret i32 %tmp2 |
| } |
| |
| declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) nounwind readnone |
| |
| define i32 @uaddlv_known_bits_v16i8(<16 x i8> %a) { |
| ; CHECK-SD-LABEL: uaddlv_known_bits_v16i8: |
| ; CHECK-SD: // %bb.0: // %entry |
| ; CHECK-SD-NEXT: uaddlv h0, v0.16b |
| ; CHECK-SD-NEXT: fmov w0, s0 |
| ; CHECK-SD-NEXT: ret |
| ; |
| ; CHECK-GI-LABEL: uaddlv_known_bits_v16i8: |
| ; CHECK-GI: // %bb.0: // %entry |
| ; CHECK-GI-NEXT: uaddlv h0, v0.16b |
| ; CHECK-GI-NEXT: fmov w8, s0 |
| ; CHECK-GI-NEXT: and w0, w8, #0xffff |
| ; CHECK-GI-NEXT: ret |
| entry: |
| %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) |
| %0 = and i32 %vaddlv.i, 65535 |
| ret i32 %0 |
| } |
| |
| define dso_local <8 x i8> @uaddlv_v8i8_dup(<8 x i8> %a) { |
| ; CHECK-SD-LABEL: uaddlv_v8i8_dup: |
| ; CHECK-SD: // %bb.0: // %entry |
| ; CHECK-SD-NEXT: uaddlv h0, v0.8b |
| ; CHECK-SD-NEXT: dup v0.8h, v0.h[0] |
| ; CHECK-SD-NEXT: rshrn v0.8b, v0.8h, #3 |
| ; CHECK-SD-NEXT: ret |
| ; |
| ; CHECK-GI-LABEL: uaddlv_v8i8_dup: |
| ; CHECK-GI: // %bb.0: // %entry |
| ; CHECK-GI-NEXT: uaddlv h0, v0.8b |
| ; CHECK-GI-NEXT: fmov w8, s0 |
| ; CHECK-GI-NEXT: dup v0.8h, w8 |
| ; CHECK-GI-NEXT: rshrn v0.8b, v0.8h, #3 |
| ; CHECK-GI-NEXT: ret |
| entry: |
| %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) |
| %0 = trunc i32 %vaddlv.i to i16 |
| %vecinit.i = insertelement <8 x i16> undef, i16 %0, i64 0 |
| %vecinit7.i = shufflevector <8 x i16> %vecinit.i, <8 x i16> poison, <8 x i32> zeroinitializer |
| %vrshrn_n2 = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %vecinit7.i, i32 3) |
| ret <8 x i8> %vrshrn_n2 |
| } |
| |
| declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) |
| declare i64 @llvm.aarch64.neon.urshl.i64(i64, i64) |
| |
| define <8 x i8> @uaddlv_v8i8_urshr(<8 x i8> %a) { |
| ; CHECK-LABEL: uaddlv_v8i8_urshr: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: uaddlv h0, v0.8b |
| ; CHECK-NEXT: urshr d0, d0, #3 |
| ; CHECK-NEXT: dup v0.8b, v0.b[0] |
| ; CHECK-NEXT: ret |
| entry: |
| %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) |
| %0 = and i32 %vaddlv.i, 65535 |
| %conv = zext i32 %0 to i64 |
| %vrshr_n = tail call i64 @llvm.aarch64.neon.urshl.i64(i64 %conv, i64 -3) |
| %conv1 = trunc i64 %vrshr_n to i8 |
| %vecinit.i = insertelement <8 x i8> undef, i8 %conv1, i64 0 |
| %vecinit7.i = shufflevector <8 x i8> %vecinit.i, <8 x i8> poison, <8 x i32> zeroinitializer |
| ret <8 x i8> %vecinit7.i |
| } |
| |
| define <4 x i32> @uaddlv_dup_v4i16(<4 x i16> %a) { |
| ; CHECK-LABEL: uaddlv_dup_v4i16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: uaddlv s0, v0.4h |
| ; CHECK-NEXT: dup v0.4s, v0.s[0] |
| ; CHECK-NEXT: ushr v0.4s, v0.4s, #3 |
| ; CHECK-NEXT: ret |
| entry: |
| %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) |
| %vecinit.i = insertelement <4 x i32> undef, i32 %vaddlv.i, i64 0 |
| %vecinit7.i = shufflevector <4 x i32> %vecinit.i, <4 x i32> poison, <4 x i32> zeroinitializer |
| %vshr_n = lshr <4 x i32> %vecinit7.i, <i32 3, i32 3, i32 3, i32 3> |
| ret <4 x i32> %vshr_n |
| } |
| |
| define <4 x i32> @uaddlv_dup_v8i16(<8 x i16> %a) { |
| ; CHECK-LABEL: uaddlv_dup_v8i16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: uaddlv s0, v0.8h |
| ; CHECK-NEXT: dup v0.4s, v0.s[0] |
| ; CHECK-NEXT: ushr v0.4s, v0.4s, #3 |
| ; CHECK-NEXT: ret |
| entry: |
| %vaddlv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) |
| %vecinit.i = insertelement <4 x i32> undef, i32 %vaddlv.i, i64 0 |
| %vecinit7.i = shufflevector <4 x i32> %vecinit.i, <4 x i32> poison, <4 x i32> zeroinitializer |
| %vshr_n = lshr <4 x i32> %vecinit7.i, <i32 3, i32 3, i32 3, i32 3> |
| ret <4 x i32> %vshr_n |
| } |
| |
| declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>) |
| declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>) |