| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ |
| ; RUN: -verify-machineinstrs | FileCheck %s |
| ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ |
| ; RUN: -verify-machineinstrs | FileCheck %s |
| |
| ; vadd.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vadd_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vadd_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vand.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vand_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vand_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vand.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vand_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vand_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vand.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vor.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vor_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vor.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vor_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vor.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vxor.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vxor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vxor_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vxor.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vxor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vxor_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vxor.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmseq.vv |
| declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i1> @commutable_vmseq_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmseq_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmseq.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i1> %a, %b |
| ret <vscale x 1 x i1> %ret |
| } |
| |
| declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen); |
| define <vscale x 1 x i1> @commutable_vmseq_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmseq_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmseq.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmseq.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) |
| %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2) |
| %ret = add <vscale x 1 x i1> %a, %b |
| ret <vscale x 1 x i1> %ret |
| } |
| |
| ; vmsne.vv |
| declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i1> @commutable_vmsne_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmsne_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmsne.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i1> %a, %b |
| ret <vscale x 1 x i1> %ret |
| } |
| |
| declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen); |
| define <vscale x 1 x i1> @commutable_vmsne_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmsne_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmsne.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmsne.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxor.mm v0, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) |
| %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2) |
| %ret = add <vscale x 1 x i1> %a, %b |
| ret <vscale x 1 x i1> %ret |
| } |
| |
| ; vmin.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmin_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmin_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmin.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmin_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmin_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmin.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vminu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vminu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vminu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vminu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vminu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vminu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vminu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmax.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmax_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmax_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmax.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmax_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmax_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmax.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmaxu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmaxu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmaxu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmaxu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmaxu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmaxu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmul.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmul_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmul.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmul_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmul.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmulh.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmulh_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmulh_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmulh.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmulh_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmulh_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmulh.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmulh.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vmulhu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vmulhu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vmulhu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmulhu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vmulhu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vmulhu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vmulhu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vmulhu.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwadd.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen); |
| define <vscale x 1 x i64> @commutable_vwadd_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwadd_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwadd.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwadd_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwadd_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwadd.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwadd.vv v11, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwaddu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen); |
| define <vscale x 1 x i64> @commutable_vwaddu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwaddu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwaddu.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwaddu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwaddu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwaddu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwaddu.vv v11, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwmul.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmul_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwmul_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmul.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmul_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwmul_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmul.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwmul.vv v11, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwmulu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmulu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwmulu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmulu.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmulu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwmulu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmulu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwmulu.vv v11, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwmacc.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmacc_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwmacc_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmacc.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmacc_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwmacc_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmacc.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwmacc.vv v11, v9, v8, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vwmaccu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmaccu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vwmaccu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmaccu.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vwmaccu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vwmaccu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vwmaccu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vwmaccu.vv v11, v9, v8, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v11 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vadc.vvm |
| declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen); |
| define <vscale x 1 x i64> @commutable_vadc_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vadc_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vadc.vvm v10, v8, v9, v0 |
| ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vsadd.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vsadd_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsadd.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vsadd_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsadd.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vsaddu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen); |
| define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vsaddu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsaddu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vsaddu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsaddu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vaadd.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vaadd_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vaadd.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vaadd_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vaadd.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vaaddu.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vaaddu_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vaaddu.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vaaddu_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vaaddu.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vaaddu.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| ; vsmul.vv |
| declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: commutable_vsmul_vv: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsmul.vv v8, v8, v9 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |
| |
| declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen); |
| define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) { |
| ; CHECK-LABEL: commutable_vsmul_vv_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma |
| ; CHECK-NEXT: vsmul.vv v10, v8, v9, v0.t |
| ; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1) |
| %ret = add <vscale x 1 x i64> %a, %b |
| ret <vscale x 1 x i64> %ret |
| } |