| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+f,+d -stop-after=finalize-isel < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32IF %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+f,+d -stop-after=finalize-isel < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64IF %s |
| |
| ; Make sure an implicit FRM dependency is added to instructions with dynamic |
| ; rounding. |
| |
| define float @fadd_s(float %a, float %b) nounwind { |
| ; RV32IF-LABEL: name: fadd_s |
| ; RV32IF: bb.0 (%ir-block.0): |
| ; RV32IF-NEXT: liveins: $x10, $x11 |
| ; RV32IF-NEXT: {{ $}} |
| ; RV32IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 |
| ; RV32IF-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; RV32IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV32IF-NEXT: [[FMV_W_X1:%[0-9]+]]:fpr32 = FMV_W_X [[COPY1]] |
| ; RV32IF-NEXT: %4:fpr32 = nofpexcept FADD_S killed [[FMV_W_X1]], killed [[FMV_W_X]], 7, implicit $frm |
| ; RV32IF-NEXT: [[FMV_X_W:%[0-9]+]]:gpr = FMV_X_W killed %4 |
| ; RV32IF-NEXT: $x10 = COPY [[FMV_X_W]] |
| ; RV32IF-NEXT: PseudoRET implicit $x10 |
| ; RV64IF-LABEL: name: fadd_s |
| ; RV64IF: bb.0 (%ir-block.0): |
| ; RV64IF-NEXT: liveins: $x10, $x11 |
| ; RV64IF-NEXT: {{ $}} |
| ; RV64IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 |
| ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; RV64IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV64IF-NEXT: [[FMV_W_X1:%[0-9]+]]:fpr32 = FMV_W_X [[COPY1]] |
| ; RV64IF-NEXT: %4:fpr32 = nofpexcept FADD_S killed [[FMV_W_X1]], killed [[FMV_W_X]], 7, implicit $frm |
| ; RV64IF-NEXT: [[FMV_X_W:%[0-9]+]]:gpr = FMV_X_W killed %4 |
| ; RV64IF-NEXT: $x10 = COPY [[FMV_X_W]] |
| ; RV64IF-NEXT: PseudoRET implicit $x10 |
| %1 = fadd float %a, %b |
| ret float %1 |
| } |
| |
| declare float @llvm.fma.f32(float, float, float) |
| |
| define float @fmadd_s(float %a, float %b, float %c) nounwind { |
| ; RV32IF-LABEL: name: fmadd_s |
| ; RV32IF: bb.0 (%ir-block.0): |
| ; RV32IF-NEXT: liveins: $x10, $x11, $x12 |
| ; RV32IF-NEXT: {{ $}} |
| ; RV32IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 |
| ; RV32IF-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 |
| ; RV32IF-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; RV32IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV32IF-NEXT: [[FMV_W_X1:%[0-9]+]]:fpr32 = FMV_W_X [[COPY1]] |
| ; RV32IF-NEXT: [[FMV_W_X2:%[0-9]+]]:fpr32 = FMV_W_X [[COPY2]] |
| ; RV32IF-NEXT: %6:fpr32 = nofpexcept FMADD_S killed [[FMV_W_X2]], killed [[FMV_W_X1]], killed [[FMV_W_X]], 7, implicit $frm |
| ; RV32IF-NEXT: [[FMV_X_W:%[0-9]+]]:gpr = FMV_X_W killed %6 |
| ; RV32IF-NEXT: $x10 = COPY [[FMV_X_W]] |
| ; RV32IF-NEXT: PseudoRET implicit $x10 |
| ; RV64IF-LABEL: name: fmadd_s |
| ; RV64IF: bb.0 (%ir-block.0): |
| ; RV64IF-NEXT: liveins: $x10, $x11, $x12 |
| ; RV64IF-NEXT: {{ $}} |
| ; RV64IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 |
| ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 |
| ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; RV64IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV64IF-NEXT: [[FMV_W_X1:%[0-9]+]]:fpr32 = FMV_W_X [[COPY1]] |
| ; RV64IF-NEXT: [[FMV_W_X2:%[0-9]+]]:fpr32 = FMV_W_X [[COPY2]] |
| ; RV64IF-NEXT: %6:fpr32 = nofpexcept FMADD_S killed [[FMV_W_X2]], killed [[FMV_W_X1]], killed [[FMV_W_X]], 7, implicit $frm |
| ; RV64IF-NEXT: [[FMV_X_W:%[0-9]+]]:gpr = FMV_X_W killed %6 |
| ; RV64IF-NEXT: $x10 = COPY [[FMV_X_W]] |
| ; RV64IF-NEXT: PseudoRET implicit $x10 |
| %1 = call float @llvm.fma.f32(float %a, float %b, float %c) |
| ret float %1 |
| } |
| |
| ; This uses rtz instead of dyn rounding mode so shouldn't have an FRM dependncy. |
| define i32 @fcvt_w_s(float %a) nounwind { |
| ; RV32IF-LABEL: name: fcvt_w_s |
| ; RV32IF: bb.0 (%ir-block.0): |
| ; RV32IF-NEXT: liveins: $x10 |
| ; RV32IF-NEXT: {{ $}} |
| ; RV32IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 |
| ; RV32IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV32IF-NEXT: %2:gpr = nofpexcept FCVT_W_S killed [[FMV_W_X]], 1 |
| ; RV32IF-NEXT: $x10 = COPY %2 |
| ; RV32IF-NEXT: PseudoRET implicit $x10 |
| ; RV64IF-LABEL: name: fcvt_w_s |
| ; RV64IF: bb.0 (%ir-block.0): |
| ; RV64IF-NEXT: liveins: $x10 |
| ; RV64IF-NEXT: {{ $}} |
| ; RV64IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 |
| ; RV64IF-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] |
| ; RV64IF-NEXT: %2:gpr = nofpexcept FCVT_W_S killed [[FMV_W_X]], 1 |
| ; RV64IF-NEXT: $x10 = COPY %2 |
| ; RV64IF-NEXT: PseudoRET implicit $x10 |
| %1 = fptosi float %a to i32 |
| ret i32 %1 |
| } |
| |
| ; This doesn't use a rounding mode since i32 can be represented exactly as a |
| ; double. |
| define double @fcvt_d_w(i32 %a) nounwind { |
| ; RV32IF-LABEL: name: fcvt_d_w |
| ; RV32IF: bb.0 (%ir-block.0): |
| ; RV32IF-NEXT: liveins: $x10 |
| ; RV32IF-NEXT: {{ $}} |
| ; RV32IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 |
| ; RV32IF-NEXT: %1:fpr64 = nofpexcept FCVT_D_W [[COPY]] |
| ; RV32IF-NEXT: FSD killed %1, %stack.0, 0 :: (store (s64) into %stack.0) |
| ; RV32IF-NEXT: [[LW:%[0-9]+]]:gpr = LW %stack.0, 0 :: (load (s32) from %stack.0, align 8) |
| ; RV32IF-NEXT: [[LW1:%[0-9]+]]:gpr = LW %stack.0, 4 :: (load (s32) from %stack.0 + 4, basealign 8) |
| ; RV32IF-NEXT: $x10 = COPY [[LW]] |
| ; RV32IF-NEXT: $x11 = COPY [[LW1]] |
| ; RV32IF-NEXT: PseudoRET implicit $x10, implicit $x11 |
| ; RV64IF-LABEL: name: fcvt_d_w |
| ; RV64IF: bb.0 (%ir-block.0): |
| ; RV64IF-NEXT: liveins: $x10 |
| ; RV64IF-NEXT: {{ $}} |
| ; RV64IF-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 |
| ; RV64IF-NEXT: %1:fpr64 = nofpexcept FCVT_D_W [[COPY]] |
| ; RV64IF-NEXT: [[FMV_X_D:%[0-9]+]]:gpr = FMV_X_D killed %1 |
| ; RV64IF-NEXT: $x10 = COPY [[FMV_X_D]] |
| ; RV64IF-NEXT: PseudoRET implicit $x10 |
| %1 = sitofp i32 %a to double |
| ret double %1 |
| } |