| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s |
| |
| declare <128 x double> @llvm.masked.load.v128f64.p0v128f64(<128 x double>* %0, i32 immarg %1, <128 x i1> %2, <128 x double> %3) #0 |
| |
| ; TODO: Custom widen by lowering to vvp_load in ReplaceNodeResult |
| ; Function Attrs: nounwind |
| ; define fastcc <128 x double> @vec_mload_v128f64(<128 x double>* %P, <128 x i1> %M) { |
| ; %r = call <128 x double> @llvm.masked.load.v128f64.p0v128f64(<128 x double>* %P, i32 16, <128 x i1> %M, <128 x double> undef) |
| ; ret <128 x double> %r |
| ; } |
| |
| |
| declare <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0 |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x double> @vec_mload_v256f64(<256 x double>* %P, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_v256f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v0 |
| ; CHECK-NEXT: vmulu.l %v0, 8, %v0, %vm1 |
| ; CHECK-NEXT: vaddu.l %v0, %s0, %v0, %vm1 |
| ; CHECK-NEXT: vgt %v0, %v0, 0, 0, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %P, i32 16, <256 x i1> %M, <256 x double> undef) |
| ret <256 x double> %r |
| } |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x double> @vec_load_v256f64(<256 x double>* %P) { |
| ; CHECK-LABEL: vec_load_v256f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vld %v0, 8, %s0 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = load <256 x double>, <256 x double>* %P, align 4 |
| ret <256 x double> %r |
| } |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x double> @vec_mload_pt_v256f64(<256 x double>* %P, <256 x double> %PT, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_pt_v256f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v1 |
| ; CHECK-NEXT: vmulu.l %v1, 8, %v1, %vm1 |
| ; CHECK-NEXT: vaddu.l %v1, %s0, %v1, %vm1 |
| ; CHECK-NEXT: vgt %v1, %v1, 0, 0, %vm1 |
| ; CHECK-NEXT: vmrg %v0, %v0, %v1, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %P, i32 16, <256 x i1> %M, <256 x double> %PT) |
| ret <256 x double> %r |
| } |
| |
| |
| declare <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0 |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x float> @vec_mload_v256f32(<256 x float>* %P, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_v256f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v0 |
| ; CHECK-NEXT: vmulu.l %v0, 4, %v0, %vm1 |
| ; CHECK-NEXT: vaddu.l %v0, %s0, %v0, %vm1 |
| ; CHECK-NEXT: vgtu %v0, %v0, 0, 0, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %P, i32 16, <256 x i1> %M, <256 x float> undef) |
| ret <256 x float> %r |
| } |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x float> @vec_mload_pt_v256f32(<256 x float>* %P, <256 x float> %PT, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_pt_v256f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v1 |
| ; CHECK-NEXT: vmulu.l %v1, 4, %v1, %vm1 |
| ; CHECK-NEXT: vaddu.l %v1, %s0, %v1, %vm1 |
| ; CHECK-NEXT: vgtu %v1, %v1, 0, 0, %vm1 |
| ; CHECK-NEXT: vmrg %v0, %v0, %v1, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %P, i32 16, <256 x i1> %M, <256 x float> %PT) |
| ret <256 x float> %r |
| } |
| |
| |
| declare <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0 |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x i32> @vec_mload_v256i32(<256 x i32>* %P, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_v256i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v0 |
| ; CHECK-NEXT: vmulu.l %v0, 4, %v0, %vm1 |
| ; CHECK-NEXT: vaddu.l %v0, %s0, %v0, %vm1 |
| ; CHECK-NEXT: vgtl.zx %v0, %v0, 0, 0, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %P, i32 16, <256 x i1> %M, <256 x i32> undef) |
| ret <256 x i32> %r |
| } |
| |
| ; Function Attrs: nounwind |
| define fastcc <256 x i32> @vec_mload_pt_v256i32(<256 x i32>* %P, <256 x i32> %PT, <256 x i1> %M) { |
| ; CHECK-LABEL: vec_mload_pt_v256i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lea %s1, 256 |
| ; CHECK-NEXT: lvl %s1 |
| ; CHECK-NEXT: vseq %v1 |
| ; CHECK-NEXT: vmulu.l %v1, 4, %v1, %vm1 |
| ; CHECK-NEXT: vaddu.l %v1, %s0, %v1, %vm1 |
| ; CHECK-NEXT: vgtl.zx %v1, %v1, 0, 0, %vm1 |
| ; CHECK-NEXT: vmrg %v0, %v0, %v1, %vm1 |
| ; CHECK-NEXT: b.l.t (, %s10) |
| %r = call <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %P, i32 16, <256 x i1> %M, <256 x i32> %PT) |
| ret <256 x i32> %r |
| } |
| |
| attributes #0 = { argmemonly nounwind readonly willreturn } |