| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple aarch64-apple-darwin | FileCheck --check-prefixes=CHECK,NOFP16 %s |
| ; RUN: llc < %s -mtriple aarch64-apple-darwin -mattr=+v8.2a,+fullfp16 | FileCheck --check-prefixes=CHECK,FP16 %s |
| |
| target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" |
| |
| ;============ v1f32 |
| |
| ; WidenVecRes same |
| define <1 x float> @test_copysign_v1f32_v1f32(<1 x float> %a, <1 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v1f32_v1f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.2s v2, #128, lsl #24 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %b) |
| ret <1 x float> %r |
| } |
| |
| ; WidenVecRes mismatched |
| define <1 x float> @test_copysign_v1f32_v1f64(<1 x float> %a, <1 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v1f32_v1f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: ; kill: def $d1 killed $d1 def $q1 |
| ; CHECK-NEXT: mvni.2s v2, #128, lsl #24 |
| ; CHECK-NEXT: fcvtn v1.2s, v1.2d |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <1 x double> %b to <1 x float> |
| %r = call <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %tmp0) |
| ret <1 x float> %r |
| } |
| |
| declare <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %b) #0 |
| |
| ;============ v1f64 |
| |
| ; WidenVecOp #1 |
| define <1 x double> @test_copysign_v1f64_v1f32(<1 x double> %a, <1 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v1f64_v1f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v2, #0xffffffffffffffff |
| ; CHECK-NEXT: fcvtl v1.2d, v1.2s |
| ; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fneg.2d v2, v2 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0 |
| ; CHECK-NEXT: ret |
| %tmp0 = fpext <1 x float> %b to <1 x double> |
| %r = call <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %tmp0) |
| ret <1 x double> %r |
| } |
| |
| define <1 x double> @test_copysign_v1f64_v1f64(<1 x double> %a, <1 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v1f64_v1f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v2, #0xffffffffffffffff |
| ; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: ; kill: def $d1 killed $d1 def $q1 |
| ; CHECK-NEXT: fneg.2d v2, v2 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0 |
| ; CHECK-NEXT: ret |
| %r = call <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %b) |
| ret <1 x double> %r |
| } |
| |
| declare <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %b) #0 |
| |
| ;============ v2f32 |
| |
| define <2 x float> @test_copysign_v2f32_v2f32(<2 x float> %a, <2 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v2f32_v2f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.2s v2, #128, lsl #24 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) |
| ret <2 x float> %r |
| } |
| |
| define <2 x float> @test_copysign_v2f32_v2f64(<2 x float> %a, <2 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v2f32_v2f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtn v1.2s, v1.2d |
| ; CHECK-NEXT: mvni.2s v2, #128, lsl #24 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <2 x double> %b to <2 x float> |
| %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0) |
| ret <2 x float> %r |
| } |
| |
| declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0 |
| |
| ;============ v4f32 |
| |
| define <4 x float> @test_copysign_v4f32_v4f32(<4 x float> %a, <4 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f32_v4f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.4s v2, #128, lsl #24 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) |
| ret <4 x float> %r |
| } |
| |
| ; SplitVecOp #1 |
| define <4 x float> @test_copysign_v4f32_v4f64(<4 x float> %a, <4 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f32_v4f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtn v1.2s, v1.2d |
| ; CHECK-NEXT: fcvtn2 v1.4s, v2.2d |
| ; CHECK-NEXT: mvni.4s v2, #128, lsl #24 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <4 x double> %b to <4 x float> |
| %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0) |
| ret <4 x float> %r |
| } |
| |
| declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0 |
| |
| ;============ v2f64 |
| |
| define <2 x double> @test_copysign_v2f64_v232(<2 x double> %a, <2 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v2f64_v232: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v2, #0xffffffffffffffff |
| ; CHECK-NEXT: fcvtl v1.2d, v1.2s |
| ; CHECK-NEXT: fneg.2d v2, v2 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fpext <2 x float> %b to <2 x double> |
| %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0) |
| ret <2 x double> %r |
| } |
| |
| define <2 x double> @test_copysign_v2f64_v2f64(<2 x double> %a, <2 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v2f64_v2f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v2, #0xffffffffffffffff |
| ; CHECK-NEXT: fneg.2d v2, v2 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) |
| ret <2 x double> %r |
| } |
| |
| declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0 |
| |
| ;============ v4f64 |
| |
| ; SplitVecRes mismatched |
| define <4 x double> @test_copysign_v4f64_v4f32(<4 x double> %a, <4 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f64_v4f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v3, #0xffffffffffffffff |
| ; CHECK-NEXT: fcvtl v4.2d, v2.2s |
| ; CHECK-NEXT: fcvtl2 v2.2d, v2.4s |
| ; CHECK-NEXT: fneg.2d v3, v3 |
| ; CHECK-NEXT: bif.16b v1, v2, v3 |
| ; CHECK-NEXT: bif.16b v0, v4, v3 |
| ; CHECK-NEXT: ret |
| %tmp0 = fpext <4 x float> %b to <4 x double> |
| %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0) |
| ret <4 x double> %r |
| } |
| |
| ; SplitVecRes same |
| define <4 x double> @test_copysign_v4f64_v4f64(<4 x double> %a, <4 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f64_v4f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.2d v4, #0xffffffffffffffff |
| ; CHECK-NEXT: fneg.2d v4, v4 |
| ; CHECK-NEXT: bif.16b v0, v2, v4 |
| ; CHECK-NEXT: bif.16b v1, v3, v4 |
| ; CHECK-NEXT: ret |
| %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) |
| ret <4 x double> %r |
| } |
| |
| declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0 |
| |
| ;============ v4f16 |
| |
| define <4 x half> @test_copysign_v4f16_v4f16(<4 x half> %a, <4 x half> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f16_v4f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) |
| ret <4 x half> %r |
| } |
| |
| define <4 x half> @test_copysign_v4f16_v4f32(<4 x half> %a, <4 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f16_v4f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtn v1.4h, v1.4s |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <4 x float> %b to <4 x half> |
| %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) |
| ret <4 x half> %r |
| } |
| |
| define <4 x half> @test_copysign_v4f16_v4f64(<4 x half> %a, <4 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4f16_v4f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtxn v1.2s, v1.2d |
| ; CHECK-NEXT: fcvtxn2 v1.4s, v2.2d |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: fcvtn v1.4h, v1.4s |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <4 x double> %b to <4 x half> |
| %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) |
| ret <4 x half> %r |
| } |
| |
| declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0 |
| |
| ;============ v8f16 |
| |
| define <8 x half> @test_copysign_v8f16_v8f16(<8 x half> %a, <8 x half> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v8f16_v8f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.8h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) |
| ret <8 x half> %r |
| } |
| |
| define <8 x half> @test_copysign_v8f16_v8f32(<8 x half> %a, <8 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v8f16_v8f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtn v1.4h, v1.4s |
| ; CHECK-NEXT: fcvtn2 v1.8h, v2.4s |
| ; CHECK-NEXT: mvni.8h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <8 x float> %b to <8 x half> |
| %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0) |
| ret <8 x half> %r |
| } |
| |
| declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0 |
| |
| ;============ v4bf16 |
| |
| define <4 x bfloat> @test_copysign_v4bf16_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4bf16_v4bf16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) |
| ret <4 x bfloat> %r |
| } |
| |
| define <4 x bfloat> @test_copysign_v4bf16_v4f32(<4 x bfloat> %a, <4 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4bf16_v4f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.4s v2, #1 |
| ; CHECK-NEXT: movi.4s v3, #127, msl #8 |
| ; CHECK-NEXT: ushr.4s v4, v1, #16 |
| ; CHECK-NEXT: and.16b v2, v4, v2 |
| ; CHECK-NEXT: add.4s v3, v1, v3 |
| ; CHECK-NEXT: fcmeq.4s v4, v1, v1 |
| ; CHECK-NEXT: orr.4s v1, #64, lsl #16 |
| ; CHECK-NEXT: add.4s v2, v2, v3 |
| ; CHECK-NEXT: bit.16b v1, v2, v4 |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: shrn.4h v1, v1, #16 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <4 x float> %b to <4 x bfloat> |
| %r = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %tmp0) |
| ret <4 x bfloat> %r |
| } |
| |
| define <4 x bfloat> @test_copysign_v4bf16_v4f64(<4 x bfloat> %a, <4 x double> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v4bf16_v4f64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: fcvtxn v1.2s, v1.2d |
| ; CHECK-NEXT: movi.4s v3, #127, msl #8 |
| ; CHECK-NEXT: fcvtxn2 v1.4s, v2.2d |
| ; CHECK-NEXT: movi.4s v2, #1 |
| ; CHECK-NEXT: ushr.4s v4, v1, #16 |
| ; CHECK-NEXT: add.4s v3, v1, v3 |
| ; CHECK-NEXT: and.16b v2, v4, v2 |
| ; CHECK-NEXT: fcmeq.4s v4, v1, v1 |
| ; CHECK-NEXT: orr.4s v1, #64, lsl #16 |
| ; CHECK-NEXT: add.4s v2, v2, v3 |
| ; CHECK-NEXT: bit.16b v1, v2, v4 |
| ; CHECK-NEXT: mvni.4h v2, #128, lsl #8 |
| ; CHECK-NEXT: shrn.4h v1, v1, #16 |
| ; CHECK-NEXT: bif.8b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <4 x double> %b to <4 x bfloat> |
| %r = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %tmp0) |
| ret <4 x bfloat> %r |
| } |
| |
| declare <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) #0 |
| |
| ;============ v8bf16 |
| |
| define <8 x bfloat> @test_copysign_v8bf16_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v8bf16_v8bf16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: mvni.8h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %r = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) |
| ret <8 x bfloat> %r |
| } |
| |
| define <8 x bfloat> @test_copysign_v8bf16_v8f32(<8 x bfloat> %a, <8 x float> %b) #0 { |
| ; CHECK-LABEL: test_copysign_v8bf16_v8f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: movi.4s v3, #1 |
| ; CHECK-NEXT: movi.4s v4, #127, msl #8 |
| ; CHECK-NEXT: ushr.4s v5, v2, #16 |
| ; CHECK-NEXT: ushr.4s v6, v1, #16 |
| ; CHECK-NEXT: and.16b v5, v5, v3 |
| ; CHECK-NEXT: add.4s v7, v2, v4 |
| ; CHECK-NEXT: and.16b v3, v6, v3 |
| ; CHECK-NEXT: add.4s v4, v1, v4 |
| ; CHECK-NEXT: fcmeq.4s v6, v2, v2 |
| ; CHECK-NEXT: orr.4s v2, #64, lsl #16 |
| ; CHECK-NEXT: add.4s v5, v5, v7 |
| ; CHECK-NEXT: fcmeq.4s v7, v1, v1 |
| ; CHECK-NEXT: orr.4s v1, #64, lsl #16 |
| ; CHECK-NEXT: add.4s v3, v3, v4 |
| ; CHECK-NEXT: bit.16b v2, v5, v6 |
| ; CHECK-NEXT: bit.16b v1, v3, v7 |
| ; CHECK-NEXT: uzp2.8h v1, v1, v2 |
| ; CHECK-NEXT: mvni.8h v2, #128, lsl #8 |
| ; CHECK-NEXT: bif.16b v0, v1, v2 |
| ; CHECK-NEXT: ret |
| %tmp0 = fptrunc <8 x float> %b to <8 x bfloat> |
| %r = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %tmp0) |
| ret <8 x bfloat> %r |
| } |
| |
| declare <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) #0 |
| |
| attributes #0 = { nounwind } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; FP16: {{.*}} |
| ; NOFP16: {{.*}} |