| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32 |
| ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64 |
| |
| define void @test_load_store(ptr %p, ptr %q) nounwind { |
| ; LA32-LABEL: test_load_store: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: ld.h $a0, $a0, 0 |
| ; LA32-NEXT: st.h $a0, $a1, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_load_store: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: ld.h $a0, $a0, 0 |
| ; LA64-NEXT: st.h $a0, $a1, 0 |
| ; LA64-NEXT: ret |
| %a = load half, ptr %p |
| store half %a, ptr %q |
| ret void |
| } |
| |
| define float @test_fpextend_float(ptr %p) nounwind { |
| ; LA32-LABEL: test_fpextend_float: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: ld.hu $a0, $a0, 0 |
| ; LA32-NEXT: b %plt(__gnu_h2f_ieee) |
| ; |
| ; LA64-LABEL: test_fpextend_float: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: ld.hu $a0, $a0, 0 |
| ; LA64-NEXT: b %plt(__gnu_h2f_ieee) |
| %a = load half, ptr %p |
| %r = fpext half %a to float |
| ret float %r |
| } |
| |
| define double @test_fpextend_double(ptr %p) nounwind { |
| ; LA32-LABEL: test_fpextend_double: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: ld.hu $a0, $a0, 0 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fcvt.d.s $fa0, $fa0 |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fpextend_double: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: ld.hu $a0, $a0, 0 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fcvt.d.s $fa0, $fa0 |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| %a = load half, ptr %p |
| %r = fpext half %a to double |
| ret double %r |
| } |
| |
| define void @test_fptrunc_float(float %f, ptr %p) nounwind { |
| ; LA32-LABEL: test_fptrunc_float: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: st.h $a0, $fp, 0 |
| ; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fptrunc_float: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: st.h $a0, $fp, 0 |
| ; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| %a = fptrunc float %f to half |
| store half %a, ptr %p |
| ret void |
| } |
| |
| define void @test_fptrunc_double(double %d, ptr %p) nounwind { |
| ; LA32-LABEL: test_fptrunc_double: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: bl %plt(__truncdfhf2) |
| ; LA32-NEXT: st.h $a0, $fp, 0 |
| ; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fptrunc_double: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: bl %plt(__truncdfhf2) |
| ; LA64-NEXT: st.h $a0, $fp, 0 |
| ; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| %a = fptrunc double %d to half |
| store half %a, ptr %p |
| ret void |
| } |
| |
| define half @test_fadd_reg(half %a, half %b) nounwind { |
| ; LA32-LABEL: test_fadd_reg: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: move $a0, $a1 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmov.s $fs0, $fa0 |
| ; LA32-NEXT: move $a0, $fp |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fadd.s $fa0, $fa0, $fs0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fadd_reg: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -32 |
| ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: move $a0, $a1 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmov.s $fs0, $fa0 |
| ; LA64-NEXT: move $a0, $fp |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fadd.s $fa0, $fa0, $fs0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 32 |
| ; LA64-NEXT: ret |
| %r = fadd half %a, %b |
| ret half %r |
| } |
| |
| define void @test_fadd_mem(ptr %p, ptr %q) nounwind { |
| ; LA32-LABEL: test_fadd_mem: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -32 |
| ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill |
| ; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: ld.hu $s0, $a0, 0 |
| ; LA32-NEXT: ld.hu $a0, $a1, 0 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmov.s $fs0, $fa0 |
| ; LA32-NEXT: move $a0, $s0 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fadd.s $fa0, $fa0, $fs0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: st.h $a0, $fp, 0 |
| ; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload |
| ; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 32 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fadd_mem: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -32 |
| ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: ld.hu $s0, $a0, 0 |
| ; LA64-NEXT: ld.hu $a0, $a1, 0 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmov.s $fs0, $fa0 |
| ; LA64-NEXT: move $a0, $s0 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fadd.s $fa0, $fa0, $fs0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: st.h $a0, $fp, 0 |
| ; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 32 |
| ; LA64-NEXT: ret |
| %a = load half, ptr %p |
| %b = load half, ptr %q |
| %r = fadd half %a, %b |
| store half %r, ptr %p |
| ret void |
| } |
| |
| define half @test_fmul_reg(half %a, half %b) nounwind { |
| ; LA32-LABEL: test_fmul_reg: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: move $a0, $a1 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmov.s $fs0, $fa0 |
| ; LA32-NEXT: move $a0, $fp |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmul.s $fa0, $fa0, $fs0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fmul_reg: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -32 |
| ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: move $a0, $a1 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmov.s $fs0, $fa0 |
| ; LA64-NEXT: move $a0, $fp |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmul.s $fa0, $fa0, $fs0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 32 |
| ; LA64-NEXT: ret |
| %r = fmul half %a, %b |
| ret half %r |
| } |
| |
| define void @test_fmul_mem(ptr %p, ptr %q) nounwind { |
| ; LA32-LABEL: test_fmul_mem: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -32 |
| ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill |
| ; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill |
| ; LA32-NEXT: move $fp, $a0 |
| ; LA32-NEXT: ld.hu $s0, $a0, 0 |
| ; LA32-NEXT: ld.hu $a0, $a1, 0 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmov.s $fs0, $fa0 |
| ; LA32-NEXT: move $a0, $s0 |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fmul.s $fa0, $fa0, $fs0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: st.h $a0, $fp, 0 |
| ; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload |
| ; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 32 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_fmul_mem: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -32 |
| ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill |
| ; LA64-NEXT: move $fp, $a0 |
| ; LA64-NEXT: ld.hu $s0, $a0, 0 |
| ; LA64-NEXT: ld.hu $a0, $a1, 0 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmov.s $fs0, $fa0 |
| ; LA64-NEXT: move $a0, $s0 |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fmul.s $fa0, $fa0, $fs0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: st.h $a0, $fp, 0 |
| ; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 32 |
| ; LA64-NEXT: ret |
| %a = load half, ptr %p |
| %b = load half, ptr %q |
| %r = fmul half %a, %b |
| store half %r, ptr %p |
| ret void |
| } |
| |
| define half @freeze_half_undef() nounwind { |
| ; LA32-LABEL: freeze_half_undef: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: movgr2fr.w $fa0, $zero |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fadd.s $fa0, $fa0, $fa0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: freeze_half_undef: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: movgr2fr.w $fa0, $zero |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fadd.s $fa0, $fa0, $fa0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| %y1 = freeze half undef |
| %t1 = fadd half %y1, %y1 |
| ret half %t1 |
| } |
| |
| define half @freeze_half_poison(half %maybe.poison) nounwind { |
| ; LA32-LABEL: freeze_half_poison: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: fadd.s $fa0, $fa0, $fa0 |
| ; LA32-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: freeze_half_poison: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: fadd.s $fa0, $fa0, $fa0 |
| ; LA64-NEXT: bl %plt(__gnu_f2h_ieee) |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| %y1 = freeze half %maybe.poison |
| %t1 = fadd half %y1, %y1 |
| ret half %t1 |
| } |
| |
| define signext i32 @test_half_to_s32(half %a) nounwind { |
| ; LA32-LABEL: test_half_to_s32: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: ftintrz.w.s $fa0, $fa0 |
| ; LA32-NEXT: movfr2gr.s $a0, $fa0 |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_half_to_s32: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: ftintrz.w.s $fa0, $fa0 |
| ; LA64-NEXT: movfr2gr.s $a0, $fa0 |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| entry: |
| %conv = fptosi half %a to i32 |
| ret i32 %conv |
| } |
| |
| define zeroext i32 @test_half_to_s32_u32(half %a) nounwind { |
| ; LA32-LABEL: test_half_to_s32_u32: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: ftintrz.w.s $fa0, $fa0 |
| ; LA32-NEXT: movfr2gr.s $a0, $fa0 |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_half_to_s32_u32: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: ftintrz.w.s $fa0, $fa0 |
| ; LA64-NEXT: movfr2gr.s $a0, $fa0 |
| ; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| entry: |
| %conv = fptosi half %a to i32 |
| ret i32 %conv |
| } |
| |
| define i64 @test_half_to_i64(half %a) nounwind { |
| ; LA32-LABEL: test_half_to_i64: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA32-NEXT: bl %plt(__fixsfdi) |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: test_half_to_i64: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: bl %plt(__gnu_h2f_ieee) |
| ; LA64-NEXT: ftintrz.l.s $fa0, $fa0 |
| ; LA64-NEXT: movfr2gr.d $a0, $fa0 |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| entry: |
| %conv = fptosi half %a to i64 |
| ret i64 %conv |
| } |