| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32 |
| ; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64 |
| |
| define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) { |
| ; LA32-LABEL: smuloi64: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: mulh.wu $a5, $a0, $a2 |
| ; LA32-NEXT: mul.w $a6, $a1, $a2 |
| ; LA32-NEXT: add.w $a5, $a6, $a5 |
| ; LA32-NEXT: sltu $a6, $a5, $a6 |
| ; LA32-NEXT: mulh.wu $a7, $a1, $a2 |
| ; LA32-NEXT: srai.w $t0, $a1, 31 |
| ; LA32-NEXT: mul.w $t0, $t0, $a2 |
| ; LA32-NEXT: add.w $a7, $a7, $t0 |
| ; LA32-NEXT: add.w $a6, $a7, $a6 |
| ; LA32-NEXT: mulh.wu $a7, $a0, $a3 |
| ; LA32-NEXT: srai.w $t0, $a3, 31 |
| ; LA32-NEXT: mul.w $t0, $a0, $t0 |
| ; LA32-NEXT: add.w $a7, $a7, $t0 |
| ; LA32-NEXT: mul.w $t0, $a0, $a3 |
| ; LA32-NEXT: add.w $a5, $t0, $a5 |
| ; LA32-NEXT: sltu $t0, $a5, $t0 |
| ; LA32-NEXT: add.w $a7, $a7, $t0 |
| ; LA32-NEXT: add.w $t0, $a6, $a7 |
| ; LA32-NEXT: sltu $t1, $t0, $a6 |
| ; LA32-NEXT: srai.w $a6, $a6, 31 |
| ; LA32-NEXT: srai.w $a7, $a7, 31 |
| ; LA32-NEXT: add.w $a6, $a6, $a7 |
| ; LA32-NEXT: add.w $a6, $a6, $t1 |
| ; LA32-NEXT: mulh.w $a7, $a1, $a3 |
| ; LA32-NEXT: add.w $a6, $a7, $a6 |
| ; LA32-NEXT: mul.w $a1, $a1, $a3 |
| ; LA32-NEXT: add.w $a3, $a1, $t0 |
| ; LA32-NEXT: sltu $a1, $a3, $a1 |
| ; LA32-NEXT: add.w $a1, $a6, $a1 |
| ; LA32-NEXT: srai.w $a6, $a5, 31 |
| ; LA32-NEXT: xor $a1, $a1, $a6 |
| ; LA32-NEXT: xor $a3, $a3, $a6 |
| ; LA32-NEXT: or $a1, $a3, $a1 |
| ; LA32-NEXT: sltu $a1, $zero, $a1 |
| ; LA32-NEXT: mul.w $a0, $a0, $a2 |
| ; LA32-NEXT: st.w $a0, $a4, 0 |
| ; LA32-NEXT: st.w $a5, $a4, 4 |
| ; LA32-NEXT: move $a0, $a1 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: smuloi64: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: mulh.d $a3, $a0, $a1 |
| ; LA64-NEXT: mul.d $a1, $a0, $a1 |
| ; LA64-NEXT: srai.d $a0, $a1, 63 |
| ; LA64-NEXT: xor $a0, $a3, $a0 |
| ; LA64-NEXT: sltu $a0, $zero, $a0 |
| ; LA64-NEXT: st.d $a1, $a2, 0 |
| ; LA64-NEXT: ret |
| %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) |
| %val = extractvalue {i64, i1} %t, 0 |
| %obit = extractvalue {i64, i1} %t, 1 |
| store i64 %val, ptr %res |
| ret i1 %obit |
| } |
| |
| define zeroext i1 @smuloi128(i128 %v1, i128 %v2, ptr %res) { |
| ; LA32-LABEL: smuloi128: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -48 |
| ; LA32-NEXT: .cfi_def_cfa_offset 48 |
| ; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s0, $sp, 36 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s1, $sp, 32 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s2, $sp, 28 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s3, $sp, 24 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s4, $sp, 20 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s5, $sp, 16 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s6, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s7, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s8, $sp, 4 # 4-byte Folded Spill |
| ; LA32-NEXT: .cfi_offset 1, -4 |
| ; LA32-NEXT: .cfi_offset 22, -8 |
| ; LA32-NEXT: .cfi_offset 23, -12 |
| ; LA32-NEXT: .cfi_offset 24, -16 |
| ; LA32-NEXT: .cfi_offset 25, -20 |
| ; LA32-NEXT: .cfi_offset 26, -24 |
| ; LA32-NEXT: .cfi_offset 27, -28 |
| ; LA32-NEXT: .cfi_offset 28, -32 |
| ; LA32-NEXT: .cfi_offset 29, -36 |
| ; LA32-NEXT: .cfi_offset 30, -40 |
| ; LA32-NEXT: .cfi_offset 31, -44 |
| ; LA32-NEXT: ld.w $a5, $a1, 12 |
| ; LA32-NEXT: ld.w $a6, $a1, 8 |
| ; LA32-NEXT: ld.w $t1, $a0, 4 |
| ; LA32-NEXT: ld.w $a3, $a1, 0 |
| ; LA32-NEXT: ld.w $a7, $a0, 8 |
| ; LA32-NEXT: ld.w $t0, $a0, 12 |
| ; LA32-NEXT: ld.w $a4, $a0, 0 |
| ; LA32-NEXT: ld.w $t2, $a1, 4 |
| ; LA32-NEXT: mulh.wu $a0, $a7, $a3 |
| ; LA32-NEXT: mul.w $a1, $t0, $a3 |
| ; LA32-NEXT: add.w $a0, $a1, $a0 |
| ; LA32-NEXT: sltu $a1, $a0, $a1 |
| ; LA32-NEXT: mulh.wu $t3, $t0, $a3 |
| ; LA32-NEXT: add.w $a1, $t3, $a1 |
| ; LA32-NEXT: mul.w $t3, $a7, $t2 |
| ; LA32-NEXT: add.w $t4, $t3, $a0 |
| ; LA32-NEXT: sltu $a0, $t4, $t3 |
| ; LA32-NEXT: mulh.wu $t3, $a7, $t2 |
| ; LA32-NEXT: add.w $a0, $t3, $a0 |
| ; LA32-NEXT: add.w $t3, $a1, $a0 |
| ; LA32-NEXT: mul.w $t5, $t0, $t2 |
| ; LA32-NEXT: add.w $t6, $t5, $t3 |
| ; LA32-NEXT: srai.w $a0, $t0, 31 |
| ; LA32-NEXT: mul.w $t7, $a3, $a0 |
| ; LA32-NEXT: add.w $t8, $t6, $t7 |
| ; LA32-NEXT: sltu $fp, $t8, $t6 |
| ; LA32-NEXT: sltu $t5, $t6, $t5 |
| ; LA32-NEXT: sltu $a1, $t3, $a1 |
| ; LA32-NEXT: mulh.wu $t3, $t0, $t2 |
| ; LA32-NEXT: add.w $a1, $t3, $a1 |
| ; LA32-NEXT: add.w $a1, $a1, $t5 |
| ; LA32-NEXT: mulh.wu $t3, $a3, $a0 |
| ; LA32-NEXT: add.w $t3, $t3, $t7 |
| ; LA32-NEXT: mul.w $t5, $t2, $a0 |
| ; LA32-NEXT: add.w $t3, $t3, $t5 |
| ; LA32-NEXT: add.w $a1, $a1, $t3 |
| ; LA32-NEXT: add.w $t3, $a1, $fp |
| ; LA32-NEXT: mulh.wu $a1, $a4, $a3 |
| ; LA32-NEXT: mul.w $t5, $t1, $a3 |
| ; LA32-NEXT: add.w $a1, $t5, $a1 |
| ; LA32-NEXT: sltu $t5, $a1, $t5 |
| ; LA32-NEXT: mulh.wu $t6, $t1, $a3 |
| ; LA32-NEXT: add.w $t5, $t6, $t5 |
| ; LA32-NEXT: mul.w $t6, $a4, $t2 |
| ; LA32-NEXT: add.w $a1, $t6, $a1 |
| ; LA32-NEXT: sltu $t6, $a1, $t6 |
| ; LA32-NEXT: mulh.wu $t7, $a4, $t2 |
| ; LA32-NEXT: add.w $t6, $t7, $t6 |
| ; LA32-NEXT: add.w $t6, $t5, $t6 |
| ; LA32-NEXT: mul.w $t7, $t1, $t2 |
| ; LA32-NEXT: add.w $fp, $t7, $t6 |
| ; LA32-NEXT: sltu $t7, $fp, $t7 |
| ; LA32-NEXT: sltu $t5, $t6, $t5 |
| ; LA32-NEXT: mulh.wu $t2, $t1, $t2 |
| ; LA32-NEXT: add.w $t2, $t2, $t5 |
| ; LA32-NEXT: add.w $t2, $t2, $t7 |
| ; LA32-NEXT: add.w $t2, $t4, $t2 |
| ; LA32-NEXT: mul.w $t5, $a7, $a3 |
| ; LA32-NEXT: add.w $t6, $t5, $fp |
| ; LA32-NEXT: sltu $t5, $t6, $t5 |
| ; LA32-NEXT: add.w $t2, $t2, $t5 |
| ; LA32-NEXT: sltu $t7, $t2, $t4 |
| ; LA32-NEXT: xor $t4, $t2, $t4 |
| ; LA32-NEXT: sltui $t4, $t4, 1 |
| ; LA32-NEXT: masknez $t7, $t7, $t4 |
| ; LA32-NEXT: maskeqz $t4, $t5, $t4 |
| ; LA32-NEXT: or $t4, $t4, $t7 |
| ; LA32-NEXT: add.w $t5, $t8, $t4 |
| ; LA32-NEXT: sltu $t4, $t5, $t8 |
| ; LA32-NEXT: add.w $t4, $t3, $t4 |
| ; LA32-NEXT: mulh.wu $t3, $a4, $a6 |
| ; LA32-NEXT: mul.w $t7, $t1, $a6 |
| ; LA32-NEXT: add.w $t3, $t7, $t3 |
| ; LA32-NEXT: sltu $t7, $t3, $t7 |
| ; LA32-NEXT: mulh.wu $t8, $t1, $a6 |
| ; LA32-NEXT: add.w $t7, $t8, $t7 |
| ; LA32-NEXT: mul.w $t8, $a4, $a5 |
| ; LA32-NEXT: add.w $fp, $t8, $t3 |
| ; LA32-NEXT: sltu $t3, $fp, $t8 |
| ; LA32-NEXT: mulh.wu $t8, $a4, $a5 |
| ; LA32-NEXT: add.w $t3, $t8, $t3 |
| ; LA32-NEXT: add.w $t8, $t7, $t3 |
| ; LA32-NEXT: mul.w $s0, $t1, $a5 |
| ; LA32-NEXT: add.w $s1, $s0, $t8 |
| ; LA32-NEXT: srai.w $t3, $a5, 31 |
| ; LA32-NEXT: mul.w $s2, $t3, $a4 |
| ; LA32-NEXT: add.w $s3, $s1, $s2 |
| ; LA32-NEXT: sltu $s4, $s3, $s1 |
| ; LA32-NEXT: sltu $s0, $s1, $s0 |
| ; LA32-NEXT: sltu $t7, $t8, $t7 |
| ; LA32-NEXT: mulh.wu $t8, $t1, $a5 |
| ; LA32-NEXT: add.w $t7, $t8, $t7 |
| ; LA32-NEXT: add.w $t7, $t7, $s0 |
| ; LA32-NEXT: mul.w $t1, $t3, $t1 |
| ; LA32-NEXT: mulh.wu $t8, $t3, $a4 |
| ; LA32-NEXT: add.w $t1, $t8, $t1 |
| ; LA32-NEXT: add.w $t1, $t1, $s2 |
| ; LA32-NEXT: add.w $t1, $t7, $t1 |
| ; LA32-NEXT: add.w $t7, $t1, $s4 |
| ; LA32-NEXT: add.w $t2, $fp, $t2 |
| ; LA32-NEXT: mul.w $t8, $a4, $a6 |
| ; LA32-NEXT: add.w $t1, $t8, $t6 |
| ; LA32-NEXT: sltu $t6, $t1, $t8 |
| ; LA32-NEXT: add.w $t2, $t2, $t6 |
| ; LA32-NEXT: sltu $t8, $t2, $fp |
| ; LA32-NEXT: xor $fp, $t2, $fp |
| ; LA32-NEXT: sltui $fp, $fp, 1 |
| ; LA32-NEXT: masknez $t8, $t8, $fp |
| ; LA32-NEXT: maskeqz $t6, $t6, $fp |
| ; LA32-NEXT: or $t6, $t6, $t8 |
| ; LA32-NEXT: add.w $t6, $s3, $t6 |
| ; LA32-NEXT: sltu $t8, $t6, $s3 |
| ; LA32-NEXT: add.w $t7, $t7, $t8 |
| ; LA32-NEXT: add.w $t8, $t4, $t7 |
| ; LA32-NEXT: add.w $t6, $t5, $t6 |
| ; LA32-NEXT: sltu $fp, $t6, $t5 |
| ; LA32-NEXT: add.w $t8, $t8, $fp |
| ; LA32-NEXT: mulh.wu $t5, $a7, $a6 |
| ; LA32-NEXT: mul.w $s0, $t0, $a6 |
| ; LA32-NEXT: add.w $s1, $s0, $t5 |
| ; LA32-NEXT: mul.w $s2, $a7, $a5 |
| ; LA32-NEXT: add.w $s3, $s2, $s1 |
| ; LA32-NEXT: add.w $s4, $s3, $t8 |
| ; LA32-NEXT: mul.w $s5, $a7, $a6 |
| ; LA32-NEXT: add.w $t5, $s5, $t6 |
| ; LA32-NEXT: sltu $s5, $t5, $s5 |
| ; LA32-NEXT: add.w $t6, $s4, $s5 |
| ; LA32-NEXT: sltu $s4, $t6, $s3 |
| ; LA32-NEXT: xor $s6, $t6, $s3 |
| ; LA32-NEXT: sltui $s6, $s6, 1 |
| ; LA32-NEXT: masknez $s4, $s4, $s6 |
| ; LA32-NEXT: maskeqz $s5, $s5, $s6 |
| ; LA32-NEXT: or $s4, $s5, $s4 |
| ; LA32-NEXT: sltu $s5, $t8, $t4 |
| ; LA32-NEXT: xor $t8, $t8, $t4 |
| ; LA32-NEXT: sltui $t8, $t8, 1 |
| ; LA32-NEXT: masknez $s5, $s5, $t8 |
| ; LA32-NEXT: maskeqz $t8, $fp, $t8 |
| ; LA32-NEXT: or $t8, $t8, $s5 |
| ; LA32-NEXT: srai.w $t4, $t4, 31 |
| ; LA32-NEXT: srai.w $t7, $t7, 31 |
| ; LA32-NEXT: add.w $t7, $t4, $t7 |
| ; LA32-NEXT: add.w $t8, $t7, $t8 |
| ; LA32-NEXT: sltu $fp, $s1, $s0 |
| ; LA32-NEXT: mulh.wu $s0, $t0, $a6 |
| ; LA32-NEXT: add.w $fp, $s0, $fp |
| ; LA32-NEXT: sltu $s0, $s3, $s2 |
| ; LA32-NEXT: mulh.wu $s1, $a7, $a5 |
| ; LA32-NEXT: add.w $s0, $s1, $s0 |
| ; LA32-NEXT: add.w $s0, $fp, $s0 |
| ; LA32-NEXT: mul.w $s1, $t0, $a5 |
| ; LA32-NEXT: add.w $s2, $s1, $s0 |
| ; LA32-NEXT: mul.w $s3, $a6, $a0 |
| ; LA32-NEXT: mul.w $s5, $t3, $a7 |
| ; LA32-NEXT: add.w $s6, $s5, $s3 |
| ; LA32-NEXT: add.w $s7, $s2, $s6 |
| ; LA32-NEXT: add.w $s8, $s7, $t8 |
| ; LA32-NEXT: add.w $s4, $s8, $s4 |
| ; LA32-NEXT: sltu $ra, $s4, $s8 |
| ; LA32-NEXT: sltu $t4, $t7, $t4 |
| ; LA32-NEXT: add.w $t4, $t7, $t4 |
| ; LA32-NEXT: sltu $t7, $t8, $t7 |
| ; LA32-NEXT: add.w $t4, $t4, $t7 |
| ; LA32-NEXT: sltu $t7, $s7, $s2 |
| ; LA32-NEXT: sltu $t8, $s2, $s1 |
| ; LA32-NEXT: sltu $fp, $s0, $fp |
| ; LA32-NEXT: mulh.wu $s0, $t0, $a5 |
| ; LA32-NEXT: add.w $fp, $s0, $fp |
| ; LA32-NEXT: add.w $t8, $fp, $t8 |
| ; LA32-NEXT: mulh.wu $a6, $a6, $a0 |
| ; LA32-NEXT: add.w $a6, $a6, $s3 |
| ; LA32-NEXT: mul.w $a0, $a5, $a0 |
| ; LA32-NEXT: add.w $a0, $a6, $a0 |
| ; LA32-NEXT: mul.w $a5, $t3, $t0 |
| ; LA32-NEXT: mulh.wu $a6, $t3, $a7 |
| ; LA32-NEXT: add.w $a5, $a6, $a5 |
| ; LA32-NEXT: add.w $a5, $a5, $s5 |
| ; LA32-NEXT: add.w $a0, $a5, $a0 |
| ; LA32-NEXT: sltu $a5, $s6, $s5 |
| ; LA32-NEXT: add.w $a0, $a0, $a5 |
| ; LA32-NEXT: add.w $a0, $t8, $a0 |
| ; LA32-NEXT: add.w $a0, $a0, $t7 |
| ; LA32-NEXT: add.w $a0, $a0, $t4 |
| ; LA32-NEXT: sltu $a5, $s8, $s7 |
| ; LA32-NEXT: add.w $a0, $a0, $a5 |
| ; LA32-NEXT: add.w $a0, $a0, $ra |
| ; LA32-NEXT: srai.w $a5, $t2, 31 |
| ; LA32-NEXT: xor $a0, $a0, $a5 |
| ; LA32-NEXT: xor $a6, $t6, $a5 |
| ; LA32-NEXT: or $a0, $a6, $a0 |
| ; LA32-NEXT: xor $a6, $s4, $a5 |
| ; LA32-NEXT: xor $a5, $t5, $a5 |
| ; LA32-NEXT: or $a5, $a5, $a6 |
| ; LA32-NEXT: or $a0, $a5, $a0 |
| ; LA32-NEXT: sltu $a0, $zero, $a0 |
| ; LA32-NEXT: mul.w $a3, $a4, $a3 |
| ; LA32-NEXT: st.w $a3, $a2, 0 |
| ; LA32-NEXT: st.w $a1, $a2, 4 |
| ; LA32-NEXT: st.w $t1, $a2, 8 |
| ; LA32-NEXT: st.w $t2, $a2, 12 |
| ; LA32-NEXT: ld.w $s8, $sp, 4 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s7, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s6, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s5, $sp, 16 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s4, $sp, 20 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s3, $sp, 24 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s2, $sp, 28 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s1, $sp, 32 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s0, $sp, 36 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 48 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: smuloi128: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: mulh.du $a5, $a0, $a2 |
| ; LA64-NEXT: mul.d $a6, $a1, $a2 |
| ; LA64-NEXT: add.d $a5, $a6, $a5 |
| ; LA64-NEXT: sltu $a6, $a5, $a6 |
| ; LA64-NEXT: mulh.du $a7, $a1, $a2 |
| ; LA64-NEXT: srai.d $t0, $a1, 63 |
| ; LA64-NEXT: mul.d $t0, $t0, $a2 |
| ; LA64-NEXT: add.d $a7, $a7, $t0 |
| ; LA64-NEXT: add.d $a6, $a7, $a6 |
| ; LA64-NEXT: mulh.du $a7, $a0, $a3 |
| ; LA64-NEXT: srai.d $t0, $a3, 63 |
| ; LA64-NEXT: mul.d $t0, $a0, $t0 |
| ; LA64-NEXT: add.d $a7, $a7, $t0 |
| ; LA64-NEXT: mul.d $t0, $a0, $a3 |
| ; LA64-NEXT: add.d $a5, $t0, $a5 |
| ; LA64-NEXT: sltu $t0, $a5, $t0 |
| ; LA64-NEXT: add.d $a7, $a7, $t0 |
| ; LA64-NEXT: add.d $t0, $a6, $a7 |
| ; LA64-NEXT: sltu $t1, $t0, $a6 |
| ; LA64-NEXT: srai.d $a6, $a6, 63 |
| ; LA64-NEXT: srai.d $a7, $a7, 63 |
| ; LA64-NEXT: add.d $a6, $a6, $a7 |
| ; LA64-NEXT: add.d $a6, $a6, $t1 |
| ; LA64-NEXT: mulh.d $a7, $a1, $a3 |
| ; LA64-NEXT: add.d $a6, $a7, $a6 |
| ; LA64-NEXT: mul.d $a1, $a1, $a3 |
| ; LA64-NEXT: add.d $a3, $a1, $t0 |
| ; LA64-NEXT: sltu $a1, $a3, $a1 |
| ; LA64-NEXT: add.d $a1, $a6, $a1 |
| ; LA64-NEXT: srai.d $a6, $a5, 63 |
| ; LA64-NEXT: xor $a1, $a1, $a6 |
| ; LA64-NEXT: xor $a3, $a3, $a6 |
| ; LA64-NEXT: or $a1, $a3, $a1 |
| ; LA64-NEXT: sltu $a1, $zero, $a1 |
| ; LA64-NEXT: mul.d $a0, $a0, $a2 |
| ; LA64-NEXT: st.d $a0, $a4, 0 |
| ; LA64-NEXT: st.d $a5, $a4, 8 |
| ; LA64-NEXT: move $a0, $a1 |
| ; LA64-NEXT: ret |
| %t = call {i128, i1} @llvm.smul.with.overflow.i128(i128 %v1, i128 %v2) |
| %val = extractvalue {i128, i1} %t, 0 |
| %obit = extractvalue {i128, i1} %t, 1 |
| store i128 %val, ptr %res |
| ret i1 %obit |
| } |
| |
| declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone |
| declare {i128, i1} @llvm.smul.with.overflow.i128(i128, i128) nounwind readnone |