| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=neoverse-n2 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-STD |
| ; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=neoverse-n2 -enable-unsafe-fp-math < %s | FileCheck %s --check-prefixes=CHECK,CHECK-UNSAFE |
| |
| ; Incremental updates of the instruction depths should be enough for this test |
| ; case. |
| ; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=neoverse-n2 -enable-unsafe-fp-math \ |
| ; RUN: -machine-combiner-inc-threshold=0 -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefixes=CHECK,CHECK-UNSAFE |
| |
| ; Verify that the first two adds are independent regardless of how the inputs are |
| ; commuted. The destination registers are used as source registers for the third add. |
| |
| define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds1: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s0, s2 |
| ; CHECK-STD-NEXT: fadd s0, s0, s3 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds1: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s2, s3 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd float %x0, %x1 |
| %t1 = fadd float %t0, %x2 |
| %t2 = fadd float %t1, %x3 |
| ret float %t2 |
| } |
| |
| define float @reassociate_adds1_fast(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-LABEL: reassociate_adds1_fast: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fadd s0, s0, s1 |
| ; CHECK-NEXT: fadd s1, s2, s3 |
| ; CHECK-NEXT: fadd s0, s0, s1 |
| ; CHECK-NEXT: ret |
| %t0 = fadd fast float %x0, %x1 |
| %t1 = fadd fast float %t0, %x2 |
| %t2 = fadd fast float %t1, %x3 |
| ret float %t2 |
| } |
| |
| define float @reassociate_adds1_reassoc(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds1_reassoc: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s0, s2 |
| ; CHECK-STD-NEXT: fadd s0, s0, s3 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds1_reassoc: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s2, s3 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd reassoc float %x0, %x1 |
| %t1 = fadd reassoc float %t0, %x2 |
| %t2 = fadd reassoc float %t1, %x3 |
| ret float %t2 |
| } |
| |
| define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds2: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s2, s0 |
| ; CHECK-STD-NEXT: fadd s0, s0, s3 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds2: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s2, s3 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s1, s0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd float %x0, %x1 |
| %t1 = fadd float %x2, %t0 |
| %t2 = fadd float %t1, %x3 |
| ret float %t2 |
| } |
| |
| define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds3: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s0, s2 |
| ; CHECK-STD-NEXT: fadd s0, s3, s0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds3: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s3, s2 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s1, s0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd float %x0, %x1 |
| %t1 = fadd float %t0, %x2 |
| %t2 = fadd float %x3, %t1 |
| ret float %t2 |
| } |
| |
| define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds4: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s2, s0 |
| ; CHECK-STD-NEXT: fadd s0, s3, s0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds4: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s3, s2 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s1, s0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd float %x0, %x1 |
| %t1 = fadd float %x2, %t0 |
| %t2 = fadd float %x3, %t1 |
| ret float %t2 |
| } |
| |
| ; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not |
| ; produced because that would cost more compile time. |
| |
| define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { |
| ; CHECK-STD-LABEL: reassociate_adds5: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s0, s2 |
| ; CHECK-STD-NEXT: fadd s0, s0, s3 |
| ; CHECK-STD-NEXT: fadd s0, s0, s4 |
| ; CHECK-STD-NEXT: fadd s0, s0, s5 |
| ; CHECK-STD-NEXT: fadd s0, s0, s6 |
| ; CHECK-STD-NEXT: fadd s0, s0, s7 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds5: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s2, s3 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s4, s5 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s1, s6 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s0, s7 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd float %x0, %x1 |
| %t1 = fadd float %t0, %x2 |
| %t2 = fadd float %t1, %x3 |
| %t3 = fadd float %t2, %x4 |
| %t4 = fadd float %t3, %x5 |
| %t5 = fadd float %t4, %x6 |
| %t6 = fadd float %t5, %x7 |
| ret float %t6 |
| } |
| |
| ; Verify that we only need two associative operations to reassociate the operands. |
| ; Also, we should reassociate such that the result of the high latency division |
| ; is used by the final 'add' rather than reassociating the %x3 operand with the |
| ; division. The latter reassociation would not improve anything. |
| |
| define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds6: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv s0, s0, s1 |
| ; CHECK-STD-NEXT: fadd s0, s2, s0 |
| ; CHECK-STD-NEXT: fadd s0, s3, s0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds6: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fadd s1, s3, s2 |
| ; CHECK-UNSAFE-NEXT: fadd s0, s1, s0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv float %x0, %x1 |
| %t1 = fadd float %x2, %t0 |
| %t2 = fadd float %x3, %t1 |
| ret float %t2 |
| } |
| |
| ; Verify that scalar single-precision multiplies are reassociated. |
| |
| define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls1: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv s0, s0, s1 |
| ; CHECK-STD-NEXT: fmul s0, s2, s0 |
| ; CHECK-STD-NEXT: fmul s0, s3, s0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls1: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv s0, s0, s1 |
| ; CHECK-UNSAFE-NEXT: fmul s1, s3, s2 |
| ; CHECK-UNSAFE-NEXT: fmul s0, s1, s0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv float %x0, %x1 |
| %t1 = fmul float %x2, %t0 |
| %t2 = fmul float %x3, %t1 |
| ret float %t2 |
| } |
| |
| ; Verify that scalar double-precision adds are reassociated. |
| |
| define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds_double: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv d0, d0, d1 |
| ; CHECK-STD-NEXT: fadd d0, d2, d0 |
| ; CHECK-STD-NEXT: fadd d0, d3, d0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_double: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv d0, d0, d1 |
| ; CHECK-UNSAFE-NEXT: fadd d1, d3, d2 |
| ; CHECK-UNSAFE-NEXT: fadd d0, d1, d0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv double %x0, %x1 |
| %t1 = fadd double %x2, %t0 |
| %t2 = fadd double %x3, %t1 |
| ret double %t2 |
| } |
| |
| ; Verify that scalar double-precision multiplies are reassociated. |
| |
| define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_double: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv d0, d0, d1 |
| ; CHECK-STD-NEXT: fmul d0, d2, d0 |
| ; CHECK-STD-NEXT: fmul d0, d3, d0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_double: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv d0, d0, d1 |
| ; CHECK-UNSAFE-NEXT: fmul d1, d3, d2 |
| ; CHECK-UNSAFE-NEXT: fmul d0, d1, d0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv double %x0, %x1 |
| %t1 = fmul double %x2, %t0 |
| %t2 = fmul double %x3, %t1 |
| ret double %t2 |
| } |
| |
| ; Verify that scalar half-precision adds are reassociated. |
| |
| define half @reassociate_adds_half(half %x0, half %x1, half %x2, half %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds_half: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv h0, h0, h1 |
| ; CHECK-STD-NEXT: fadd h0, h2, h0 |
| ; CHECK-STD-NEXT: fadd h0, h3, h0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_half: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv h0, h0, h1 |
| ; CHECK-UNSAFE-NEXT: fadd h1, h3, h2 |
| ; CHECK-UNSAFE-NEXT: fadd h0, h1, h0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv half %x0, %x1 |
| %t1 = fadd half %x2, %t0 |
| %t2 = fadd half %x3, %t1 |
| ret half %t2 |
| } |
| |
| ; Verify that scalar half-precision multiplies are reassociated. |
| |
| define half @reassociate_muls_half(half %x0, half %x1, half %x2, half %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_half: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fdiv h0, h0, h1 |
| ; CHECK-STD-NEXT: fmul h0, h2, h0 |
| ; CHECK-STD-NEXT: fmul h0, h3, h0 |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_half: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fdiv h0, h0, h1 |
| ; CHECK-UNSAFE-NEXT: fmul h1, h3, h2 |
| ; CHECK-UNSAFE-NEXT: fmul h0, h1, h0 |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fdiv half %x0, %x1 |
| %t1 = fmul half %x2, %t0 |
| %t2 = fmul half %x3, %t1 |
| ret half %t2 |
| } |
| |
| ; Verify that scalar integer adds are reassociated. |
| |
| define i32 @reassociate_adds_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) { |
| ; CHECK-LABEL: reassociate_adds_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: udiv w8, w0, w1 |
| ; CHECK-NEXT: add w9, w3, w2 |
| ; CHECK-NEXT: add w0, w9, w8 |
| ; CHECK-NEXT: ret |
| %t0 = udiv i32 %x0, %x1 |
| %t1 = add i32 %x2, %t0 |
| %t2 = add i32 %x3, %t1 |
| ret i32 %t2 |
| } |
| |
| define i64 @reassociate_adds_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) { |
| ; CHECK-LABEL: reassociate_adds_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: udiv x8, x0, x1 |
| ; CHECK-NEXT: add x9, x3, x2 |
| ; CHECK-NEXT: add x0, x9, x8 |
| ; CHECK-NEXT: ret |
| %t0 = udiv i64 %x0, %x1 |
| %t1 = add i64 %x2, %t0 |
| %t2 = add i64 %x3, %t1 |
| ret i64 %t2 |
| } |
| |
| ; Verify that scalar bitwise operations are reassociated. |
| |
| define i32 @reassociate_ands_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) { |
| ; CHECK-LABEL: reassociate_ands_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: and w8, w0, w1 |
| ; CHECK-NEXT: and w9, w2, w3 |
| ; CHECK-NEXT: and w0, w8, w9 |
| ; CHECK-NEXT: ret |
| %t0 = and i32 %x0, %x1 |
| %t1 = and i32 %t0, %x2 |
| %t2 = and i32 %t1, %x3 |
| ret i32 %t2 |
| } |
| |
| define i64 @reassociate_ors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) { |
| ; CHECK-LABEL: reassociate_ors_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: orr x8, x0, x1 |
| ; CHECK-NEXT: orr x9, x2, x3 |
| ; CHECK-NEXT: orr x0, x8, x9 |
| ; CHECK-NEXT: ret |
| %t0 = or i64 %x0, %x1 |
| %t1 = or i64 %t0, %x2 |
| %t2 = or i64 %t1, %x3 |
| ret i64 %t2 |
| } |
| |
| define i32 @reassociate_xors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) { |
| ; CHECK-LABEL: reassociate_xors_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: eor w8, w0, w1 |
| ; CHECK-NEXT: eor w9, w2, w3 |
| ; CHECK-NEXT: eor w0, w8, w9 |
| ; CHECK-NEXT: ret |
| %t0 = xor i32 %x0, %x1 |
| %t1 = xor i32 %t0, %x2 |
| %t2 = xor i32 %t1, %x3 |
| ret i32 %t2 |
| } |
| |
| ; Verify that we reassociate vector instructions too. |
| |
| define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { |
| ; CHECK-STD-LABEL: vector_reassociate_adds1: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v2.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v3.4s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: vector_reassociate_adds1: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: fadd v1.4s, v2.4s, v3.4s |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x float> %x0, %x1 |
| %t1 = fadd <4 x float> %t0, %x2 |
| %t2 = fadd <4 x float> %t1, %x3 |
| ret <4 x float> %t2 |
| } |
| |
| define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { |
| ; CHECK-STD-LABEL: vector_reassociate_adds2: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v2.4s, v0.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v3.4s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: vector_reassociate_adds2: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: fadd v1.4s, v2.4s, v3.4s |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v1.4s, v0.4s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x float> %x0, %x1 |
| %t1 = fadd <4 x float> %x2, %t0 |
| %t2 = fadd <4 x float> %t1, %x3 |
| ret <4 x float> %t2 |
| } |
| |
| define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { |
| ; CHECK-STD-LABEL: vector_reassociate_adds3: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v2.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v3.4s, v0.4s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: vector_reassociate_adds3: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: fadd v1.4s, v3.4s, v2.4s |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v1.4s, v0.4s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x float> %x0, %x1 |
| %t1 = fadd <4 x float> %t0, %x2 |
| %t2 = fadd <4 x float> %x3, %t1 |
| ret <4 x float> %t2 |
| } |
| |
| define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { |
| ; CHECK-STD-LABEL: vector_reassociate_adds4: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v2.4s, v0.4s |
| ; CHECK-STD-NEXT: fadd v0.4s, v3.4s, v0.4s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: vector_reassociate_adds4: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: fadd v1.4s, v3.4s, v2.4s |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v1.4s, v0.4s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x float> %x0, %x1 |
| %t1 = fadd <4 x float> %x2, %t0 |
| %t2 = fadd <4 x float> %x3, %t1 |
| ret <4 x float> %t2 |
| } |
| |
| ; Verify that 64-bit vector half-precision adds are reassociated. |
| |
| define <4 x half> @reassociate_adds_v4f16(<4 x half> %x0, <4 x half> %x1, <4 x half> %x2, <4 x half> %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds_v4f16: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4h, v0.4h, v1.4h |
| ; CHECK-STD-NEXT: fadd v0.4h, v2.4h, v0.4h |
| ; CHECK-STD-NEXT: fadd v0.4h, v3.4h, v0.4h |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_v4f16: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4h, v0.4h, v1.4h |
| ; CHECK-UNSAFE-NEXT: fadd v1.4h, v3.4h, v2.4h |
| ; CHECK-UNSAFE-NEXT: fadd v0.4h, v1.4h, v0.4h |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x half> %x0, %x1 |
| %t1 = fadd <4 x half> %x2, %t0 |
| %t2 = fadd <4 x half> %x3, %t1 |
| ret <4 x half> %t2 |
| } |
| |
| ; Verify that 128-bit vector half-precision multiplies are reassociated. |
| |
| define <8 x half> @reassociate_muls_v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, <8 x half> %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_v8f16: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.8h, v0.8h, v1.8h |
| ; CHECK-STD-NEXT: fmul v0.8h, v2.8h, v0.8h |
| ; CHECK-STD-NEXT: fmul v0.8h, v3.8h, v0.8h |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_v8f16: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.8h, v0.8h, v1.8h |
| ; CHECK-UNSAFE-NEXT: fmul v1.8h, v3.8h, v2.8h |
| ; CHECK-UNSAFE-NEXT: fmul v0.8h, v1.8h, v0.8h |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <8 x half> %x0, %x1 |
| %t1 = fmul <8 x half> %x2, %t0 |
| %t2 = fmul <8 x half> %x3, %t1 |
| ret <8 x half> %t2 |
| } |
| |
| ; Verify that 128-bit vector single-precision multiplies are reassociated. |
| |
| define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_v4f32: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-STD-NEXT: fmul v0.4s, v2.4s, v0.4s |
| ; CHECK-STD-NEXT: fmul v0.4s, v3.4s, v0.4s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_v4f32: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.4s, v0.4s, v1.4s |
| ; CHECK-UNSAFE-NEXT: fmul v1.4s, v3.4s, v2.4s |
| ; CHECK-UNSAFE-NEXT: fmul v0.4s, v1.4s, v0.4s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <4 x float> %x0, %x1 |
| %t1 = fmul <4 x float> %x2, %t0 |
| %t2 = fmul <4 x float> %x3, %t1 |
| ret <4 x float> %t2 |
| } |
| |
| ; Verify that 128-bit vector double-precision multiplies are reassociated. |
| |
| define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_v2f64: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd v0.2d, v0.2d, v1.2d |
| ; CHECK-STD-NEXT: fmul v0.2d, v2.2d, v0.2d |
| ; CHECK-STD-NEXT: fmul v0.2d, v3.2d, v0.2d |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_v2f64: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd v0.2d, v0.2d, v1.2d |
| ; CHECK-UNSAFE-NEXT: fmul v1.2d, v3.2d, v2.2d |
| ; CHECK-UNSAFE-NEXT: fmul v0.2d, v1.2d, v0.2d |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd <2 x double> %x0, %x1 |
| %t1 = fmul <2 x double> %x2, %t0 |
| %t2 = fmul <2 x double> %x3, %t1 |
| ret <2 x double> %t2 |
| } |
| |
| ; Verify that vector integer arithmetic operations are reassociated. |
| |
| define <2 x i32> @reassociate_muls_v2i32(<2 x i32> %x0, <2 x i32> %x1, <2 x i32> %x2, <2 x i32> %x3) { |
| ; CHECK-LABEL: reassociate_muls_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s |
| ; CHECK-NEXT: mul v1.2s, v3.2s, v2.2s |
| ; CHECK-NEXT: mul v0.2s, v1.2s, v0.2s |
| ; CHECK-NEXT: ret |
| %t0 = mul <2 x i32> %x0, %x1 |
| %t1 = mul <2 x i32> %x2, %t0 |
| %t2 = mul <2 x i32> %x3, %t1 |
| ret <2 x i32> %t2 |
| } |
| |
| define <2 x i64> @reassociate_adds_v2i64(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, <2 x i64> %x3) { |
| ; CHECK-LABEL: reassociate_adds_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: add v0.2d, v0.2d, v1.2d |
| ; CHECK-NEXT: add v1.2d, v3.2d, v2.2d |
| ; CHECK-NEXT: add v0.2d, v1.2d, v0.2d |
| ; CHECK-NEXT: ret |
| %t0 = add <2 x i64> %x0, %x1 |
| %t1 = add <2 x i64> %x2, %t0 |
| %t2 = add <2 x i64> %x3, %t1 |
| ret <2 x i64> %t2 |
| } |
| |
| ; Verify that vector bitwise operations are reassociated. |
| |
| define <16 x i8> @reassociate_ands_v16i8(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, <16 x i8> %x3) { |
| ; CHECK-LABEL: reassociate_ands_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: and v1.16b, v2.16b, v3.16b |
| ; CHECK-NEXT: and v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: ret |
| %t0 = or <16 x i8> %x0, %x1 |
| %t1 = and <16 x i8> %t0, %x2 |
| %t2 = and <16 x i8> %t1, %x3 |
| ret <16 x i8> %t2 |
| } |
| |
| define <4 x i16> @reassociate_ors_v4i16(<4 x i16> %x0, <4 x i16> %x1, <4 x i16> %x2, <4 x i16> %x3) { |
| ; CHECK-LABEL: reassociate_ors_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: orr v1.8b, v2.8b, v3.8b |
| ; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: ret |
| %t0 = xor <4 x i16> %x0, %x1 |
| %t1 = or <4 x i16> %t0, %x2 |
| %t2 = or <4 x i16> %t1, %x3 |
| ret <4 x i16> %t2 |
| } |
| |
| define <4 x i32> @reassociate_xors_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) { |
| ; CHECK-LABEL: reassociate_xors_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: and v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: eor v1.16b, v2.16b, v3.16b |
| ; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: ret |
| %t0 = and <4 x i32> %x0, %x1 |
| %t1 = xor <4 x i32> %t0, %x2 |
| %t2 = xor <4 x i32> %t1, %x3 |
| ret <4 x i32> %t2 |
| } |
| |
| ; Verify that scalable vector FP arithmetic operations are reassociated. |
| |
| define <vscale x 8 x half> @reassociate_adds_nxv4f16(<vscale x 8 x half> %x0, <vscale x 8 x half> %x1, <vscale x 8 x half> %x2, <vscale x 8 x half> %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds_nxv4f16: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd z0.h, z0.h, z1.h |
| ; CHECK-STD-NEXT: fadd z0.h, z2.h, z0.h |
| ; CHECK-STD-NEXT: fadd z0.h, z3.h, z0.h |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_nxv4f16: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd z0.h, z0.h, z1.h |
| ; CHECK-UNSAFE-NEXT: fadd z1.h, z3.h, z2.h |
| ; CHECK-UNSAFE-NEXT: fadd z0.h, z1.h, z0.h |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd reassoc <vscale x 8 x half> %x0, %x1 |
| %t1 = fadd reassoc <vscale x 8 x half> %x2, %t0 |
| %t2 = fadd reassoc <vscale x 8 x half> %x3, %t1 |
| ret <vscale x 8 x half> %t2 |
| } |
| |
| define <vscale x 4 x float> @reassociate_adds_nxv4f32(<vscale x 4 x float> %x0, <vscale x 4 x float> %x1, <vscale x 4 x float> %x2, <vscale x 4 x float> %x3) { |
| ; CHECK-STD-LABEL: reassociate_adds_nxv4f32: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fadd z0.s, z0.s, z1.s |
| ; CHECK-STD-NEXT: fadd z0.s, z2.s, z0.s |
| ; CHECK-STD-NEXT: fadd z0.s, z3.s, z0.s |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_nxv4f32: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fadd z0.s, z0.s, z1.s |
| ; CHECK-UNSAFE-NEXT: fadd z1.s, z3.s, z2.s |
| ; CHECK-UNSAFE-NEXT: fadd z0.s, z1.s, z0.s |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fadd reassoc <vscale x 4 x float> %x0, %x1 |
| %t1 = fadd reassoc <vscale x 4 x float> %x2, %t0 |
| %t2 = fadd reassoc <vscale x 4 x float> %x3, %t1 |
| ret <vscale x 4 x float> %t2 |
| } |
| |
| define <vscale x 2 x double> @reassociate_muls_nxv2f64(<vscale x 2 x double> %x0, <vscale x 2 x double> %x1, <vscale x 2 x double> %x2, <vscale x 2 x double> %x3) { |
| ; CHECK-STD-LABEL: reassociate_muls_nxv2f64: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: fmul z0.d, z0.d, z1.d |
| ; CHECK-STD-NEXT: fmul z0.d, z2.d, z0.d |
| ; CHECK-STD-NEXT: fmul z0.d, z3.d, z0.d |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_muls_nxv2f64: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: fmul z0.d, z0.d, z1.d |
| ; CHECK-UNSAFE-NEXT: fmul z1.d, z3.d, z2.d |
| ; CHECK-UNSAFE-NEXT: fmul z0.d, z1.d, z0.d |
| ; CHECK-UNSAFE-NEXT: ret |
| %t0 = fmul reassoc <vscale x 2 x double> %x0, %x1 |
| %t1 = fmul reassoc <vscale x 2 x double> %x2, %t0 |
| %t2 = fmul reassoc <vscale x 2 x double> %x3, %t1 |
| ret <vscale x 2 x double> %t2 |
| } |
| |
| ; Verify that scalable vector integer arithmetic operations are reassociated. |
| |
| define <vscale x 16 x i8> @reassociate_muls_nxv16i8(<vscale x 16 x i8> %x0, <vscale x 16 x i8> %x1, <vscale x 16 x i8> %x2, <vscale x 16 x i8> %x3) { |
| ; CHECK-LABEL: reassociate_muls_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mul z0.b, z0.b, z1.b |
| ; CHECK-NEXT: mul z1.b, z3.b, z2.b |
| ; CHECK-NEXT: mul z0.b, z1.b, z0.b |
| ; CHECK-NEXT: ret |
| %t0 = mul <vscale x 16 x i8> %x0, %x1 |
| %t1 = mul <vscale x 16 x i8> %x2, %t0 |
| %t2 = mul <vscale x 16 x i8> %x3, %t1 |
| ret <vscale x 16 x i8> %t2 |
| } |
| |
| define <vscale x 8 x i16> @reassociate_adds_nxv8i16(<vscale x 8 x i16> %x0, <vscale x 8 x i16> %x1, <vscale x 8 x i16> %x2, <vscale x 8 x i16> %x3) { |
| ; CHECK-LABEL: reassociate_adds_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: add z0.h, z0.h, z1.h |
| ; CHECK-NEXT: add z1.h, z3.h, z2.h |
| ; CHECK-NEXT: add z0.h, z1.h, z0.h |
| ; CHECK-NEXT: ret |
| %t0 = add <vscale x 8 x i16> %x0, %x1 |
| %t1 = add <vscale x 8 x i16> %x2, %t0 |
| %t2 = add <vscale x 8 x i16> %x3, %t1 |
| ret <vscale x 8 x i16> %t2 |
| } |
| |
| define <vscale x 4 x i32> @reassociate_muls_nxv4i32(<vscale x 4 x i32> %x0, <vscale x 4 x i32> %x1, <vscale x 4 x i32> %x2, <vscale x 4 x i32> %x3) { |
| ; CHECK-LABEL: reassociate_muls_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mul z0.s, z0.s, z1.s |
| ; CHECK-NEXT: mul z1.s, z3.s, z2.s |
| ; CHECK-NEXT: mul z0.s, z1.s, z0.s |
| ; CHECK-NEXT: ret |
| %t0 = mul <vscale x 4 x i32> %x0, %x1 |
| %t1 = mul <vscale x 4 x i32> %x2, %t0 |
| %t2 = mul <vscale x 4 x i32> %x3, %t1 |
| ret <vscale x 4 x i32> %t2 |
| } |
| |
| define <vscale x 2 x i64> @reassociate_adds_nxv2i64(<vscale x 2 x i64> %x0, <vscale x 2 x i64> %x1, <vscale x 2 x i64> %x2, <vscale x 2 x i64> %x3) { |
| ; CHECK-LABEL: reassociate_adds_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: add z0.d, z0.d, z1.d |
| ; CHECK-NEXT: add z1.d, z3.d, z2.d |
| ; CHECK-NEXT: add z0.d, z1.d, z0.d |
| ; CHECK-NEXT: ret |
| %t0 = add <vscale x 2 x i64> %x0, %x1 |
| %t1 = add <vscale x 2 x i64> %x2, %t0 |
| %t2 = add <vscale x 2 x i64> %x3, %t1 |
| ret <vscale x 2 x i64> %t2 |
| } |
| |
| ; Verify that scalable vector bitwise operations are reassociated. |
| |
| define <vscale x 16 x i8> @reassociate_ands_nxv16i8(<vscale x 16 x i8> %x0, <vscale x 16 x i8> %x1, <vscale x 16 x i8> %x2, <vscale x 16 x i8> %x3) { |
| ; CHECK-LABEL: reassociate_ands_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| ; CHECK-NEXT: and z1.d, z2.d, z3.d |
| ; CHECK-NEXT: and z0.d, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %t0 = or <vscale x 16 x i8> %x0, %x1 |
| %t1 = and <vscale x 16 x i8> %t0, %x2 |
| %t2 = and <vscale x 16 x i8> %t1, %x3 |
| ret <vscale x 16 x i8> %t2 |
| } |
| |
| define <vscale x 8 x i16> @reassociate_ors_nxv8i16(<vscale x 8 x i16> %x0, <vscale x 8 x i16> %x1, <vscale x 8 x i16> %x2, <vscale x 8 x i16> %x3) { |
| ; CHECK-LABEL: reassociate_ors_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: eor z0.d, z0.d, z1.d |
| ; CHECK-NEXT: orr z1.d, z2.d, z3.d |
| ; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %t0 = xor <vscale x 8 x i16> %x0, %x1 |
| %t1 = or <vscale x 8 x i16> %t0, %x2 |
| %t2 = or <vscale x 8 x i16> %t1, %x3 |
| ret <vscale x 8 x i16> %t2 |
| } |
| |
| ; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016 |
| ; Verify that reassociation is not happening needlessly or wrongly. |
| |
| declare double @bar() |
| |
| define double @reassociate_adds_from_calls() { |
| ; CHECK-STD-LABEL: reassociate_adds_from_calls: |
| ; CHECK-STD: // %bb.0: |
| ; CHECK-STD-NEXT: str d10, [sp, #-32]! // 8-byte Folded Spill |
| ; CHECK-STD-NEXT: stp d9, d8, [sp, #8] // 16-byte Folded Spill |
| ; CHECK-STD-NEXT: str x30, [sp, #24] // 8-byte Folded Spill |
| ; CHECK-STD-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-STD-NEXT: .cfi_offset w30, -8 |
| ; CHECK-STD-NEXT: .cfi_offset b8, -16 |
| ; CHECK-STD-NEXT: .cfi_offset b9, -24 |
| ; CHECK-STD-NEXT: .cfi_offset b10, -32 |
| ; CHECK-STD-NEXT: bl bar |
| ; CHECK-STD-NEXT: fmov d8, d0 |
| ; CHECK-STD-NEXT: bl bar |
| ; CHECK-STD-NEXT: fmov d9, d0 |
| ; CHECK-STD-NEXT: bl bar |
| ; CHECK-STD-NEXT: fmov d10, d0 |
| ; CHECK-STD-NEXT: bl bar |
| ; CHECK-STD-NEXT: fadd d1, d8, d9 |
| ; CHECK-STD-NEXT: ldp d9, d8, [sp, #8] // 16-byte Folded Reload |
| ; CHECK-STD-NEXT: ldr x30, [sp, #24] // 8-byte Folded Reload |
| ; CHECK-STD-NEXT: fadd d1, d1, d10 |
| ; CHECK-STD-NEXT: fadd d0, d1, d0 |
| ; CHECK-STD-NEXT: ldr d10, [sp], #32 // 8-byte Folded Reload |
| ; CHECK-STD-NEXT: ret |
| ; |
| ; CHECK-UNSAFE-LABEL: reassociate_adds_from_calls: |
| ; CHECK-UNSAFE: // %bb.0: |
| ; CHECK-UNSAFE-NEXT: str d10, [sp, #-32]! // 8-byte Folded Spill |
| ; CHECK-UNSAFE-NEXT: stp d9, d8, [sp, #8] // 16-byte Folded Spill |
| ; CHECK-UNSAFE-NEXT: str x30, [sp, #24] // 8-byte Folded Spill |
| ; CHECK-UNSAFE-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-UNSAFE-NEXT: .cfi_offset w30, -8 |
| ; CHECK-UNSAFE-NEXT: .cfi_offset b8, -16 |
| ; CHECK-UNSAFE-NEXT: .cfi_offset b9, -24 |
| ; CHECK-UNSAFE-NEXT: .cfi_offset b10, -32 |
| ; CHECK-UNSAFE-NEXT: bl bar |
| ; CHECK-UNSAFE-NEXT: fmov d8, d0 |
| ; CHECK-UNSAFE-NEXT: bl bar |
| ; CHECK-UNSAFE-NEXT: fmov d9, d0 |
| ; CHECK-UNSAFE-NEXT: bl bar |
| ; CHECK-UNSAFE-NEXT: fmov d10, d0 |
| ; CHECK-UNSAFE-NEXT: bl bar |
| ; CHECK-UNSAFE-NEXT: fadd d1, d8, d9 |
| ; CHECK-UNSAFE-NEXT: ldp d9, d8, [sp, #8] // 16-byte Folded Reload |
| ; CHECK-UNSAFE-NEXT: ldr x30, [sp, #24] // 8-byte Folded Reload |
| ; CHECK-UNSAFE-NEXT: fadd d0, d10, d0 |
| ; CHECK-UNSAFE-NEXT: fadd d0, d1, d0 |
| ; CHECK-UNSAFE-NEXT: ldr d10, [sp], #32 // 8-byte Folded Reload |
| ; CHECK-UNSAFE-NEXT: ret |
| %x0 = call double @bar() |
| %x1 = call double @bar() |
| %x2 = call double @bar() |
| %x3 = call double @bar() |
| %t0 = fadd double %x0, %x1 |
| %t1 = fadd double %t0, %x2 |
| %t2 = fadd double %t1, %x3 |
| ret double %t2 |
| } |
| |
| define double @already_reassociated() { |
| ; CHECK-LABEL: already_reassociated: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: str d10, [sp, #-32]! // 8-byte Folded Spill |
| ; CHECK-NEXT: stp d9, d8, [sp, #8] // 16-byte Folded Spill |
| ; CHECK-NEXT: str x30, [sp, #24] // 8-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset b8, -16 |
| ; CHECK-NEXT: .cfi_offset b9, -24 |
| ; CHECK-NEXT: .cfi_offset b10, -32 |
| ; CHECK-NEXT: bl bar |
| ; CHECK-NEXT: fmov d8, d0 |
| ; CHECK-NEXT: bl bar |
| ; CHECK-NEXT: fmov d9, d0 |
| ; CHECK-NEXT: bl bar |
| ; CHECK-NEXT: fmov d10, d0 |
| ; CHECK-NEXT: bl bar |
| ; CHECK-NEXT: fadd d1, d8, d9 |
| ; CHECK-NEXT: ldp d9, d8, [sp, #8] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr x30, [sp, #24] // 8-byte Folded Reload |
| ; CHECK-NEXT: fadd d0, d10, d0 |
| ; CHECK-NEXT: fadd d0, d1, d0 |
| ; CHECK-NEXT: ldr d10, [sp], #32 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| %x0 = call double @bar() |
| %x1 = call double @bar() |
| %x2 = call double @bar() |
| %x3 = call double @bar() |
| %t0 = fadd double %x0, %x1 |
| %t1 = fadd double %x2, %x3 |
| %t2 = fadd double %t0, %t1 |
| ret double %t2 |
| } |