| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -passes='loop-vectorize,instcombine' -sve-tail-folding-insn-threshold=0 -sve-tail-folding=all -S < %s | FileCheck %s |
| |
| target triple = "aarch64" |
| |
| ; Test that this uses the 'DataAndControlFlow' style of tail folding |
| ; where it performs the loop-indvar increment before the active.lane.mask |
| ; and uses %N directly for the tripcount. |
| define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %src, i32 %N) #0 { |
| ; CHECK-LABEL: @cannot_overflow_i32_induction_var( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[CMP6_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]] |
| ; CHECK: for.body.preheader: |
| ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]]) |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP0]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 42, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP1]], ptr [[TMP2]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]]) |
| ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| ; CHECK: for.body: |
| ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: for.cond.cleanup.loopexit: |
| ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] |
| ; CHECK: for.cond.cleanup: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %cmp6.not = icmp eq i32 %N, 0 |
| br i1 %cmp6.not, label %for.cond.cleanup, label %for.body.preheader |
| |
| for.body.preheader: |
| %wide.trip.count = zext i32 %N to i64 |
| br label %for.body |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %add = add nsw i32 %0, 42 |
| %arrayidx2 = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv |
| store i32 %add, ptr %arrayidx2, align 4 |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count |
| br i1 %exitcond.not, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |
| |
| ; Test that this uses the 'DataAndControlFlowWithoutRuntimeCheck' style of |
| ; tail folding where it uses an updated trip count and do the loop-indvar |
| ; increment after the active.lane.mask. |
| define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src, i64 %N) #0 { |
| ; CHECK-LABEL: @can_overflow_i64_induction_var( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp eq i64 [[N:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[CMP6_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]] |
| ; CHECK: for.body.preheader: |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2 |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP1]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 42, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP4]], ptr [[TMP5]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]]) |
| ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| ; CHECK: for.body: |
| ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; CHECK: for.cond.cleanup.loopexit: |
| ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] |
| ; CHECK: for.cond.cleanup: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %cmp6.not = icmp eq i64 %N, 0 |
| br i1 %cmp6.not, label %for.cond.cleanup, label %for.body.preheader |
| |
| for.body.preheader: |
| br label %for.body |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %add = add nsw i32 %0, 42 |
| %arrayidx2 = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv |
| store i32 %add, ptr %arrayidx2, align 4 |
| %indvars.iv.next = add nuw i64 %indvars.iv, 1 |
| %exitcond.not = icmp eq i64 %indvars.iv.next, %N |
| br i1 %exitcond.not, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |
| |
| attributes #0 = { vscale_range(1,16) "target-features"="+sve" } |