| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -S -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-vector-interleave=4 -force-vector-width=4 < %s | FileCheck %s |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| |
| define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { |
| ; CHECK-LABEL: @simple_memset( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 16 |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16 |
| ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 |
| ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] |
| ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP6]] |
| ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP8]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 12 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP10]] |
| ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 16 |
| ; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[UMAX]], [[TMP12]] |
| ; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[UMAX]], [[TMP12]] |
| ; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 |
| ; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 16 |
| ; CHECK-NEXT: [[TMP18:%.*]] = sub i64 [[UMAX]], [[TMP17]] |
| ; CHECK-NEXT: [[TMP19:%.*]] = icmp ugt i64 [[UMAX]], [[TMP17]] |
| ; CHECK-NEXT: [[TMP20:%.*]] = select i1 [[TMP19]], i64 [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 16 |
| ; CHECK-NEXT: [[TMP23:%.*]] = sub i64 [[UMAX]], [[TMP22]] |
| ; CHECK-NEXT: [[TMP24:%.*]] = icmp ugt i64 [[UMAX]], [[TMP22]] |
| ; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[TMP24]], i64 [[TMP23]], i64 0 |
| ; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 16 |
| ; CHECK-NEXT: [[TMP28:%.*]] = sub i64 [[UMAX]], [[TMP27]] |
| ; CHECK-NEXT: [[TMP29:%.*]] = icmp ugt i64 [[UMAX]], [[TMP27]] |
| ; CHECK-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i64 [[TMP28]], i64 0 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY5:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT2]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT10]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT12:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT13:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT12]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT14:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT15:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT14]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT19:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT16:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT17:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK9:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT18:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX6]], 0 |
| ; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 4 |
| ; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP33]], 0 |
| ; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 1 |
| ; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[INDEX6]], [[TMP35]] |
| ; CHECK-NEXT: [[TMP37:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP38:%.*]] = mul i64 [[TMP37]], 8 |
| ; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 0 |
| ; CHECK-NEXT: [[TMP40:%.*]] = mul i64 [[TMP39]], 1 |
| ; CHECK-NEXT: [[TMP41:%.*]] = add i64 [[INDEX6]], [[TMP40]] |
| ; CHECK-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP43:%.*]] = mul i64 [[TMP42]], 12 |
| ; CHECK-NEXT: [[TMP44:%.*]] = add i64 [[TMP43]], 0 |
| ; CHECK-NEXT: [[TMP45:%.*]] = mul i64 [[TMP44]], 1 |
| ; CHECK-NEXT: [[TMP46:%.*]] = add i64 [[INDEX6]], [[TMP45]] |
| ; CHECK-NEXT: [[TMP47:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP31]] |
| ; CHECK-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP36]] |
| ; CHECK-NEXT: [[TMP49:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP41]] |
| ; CHECK-NEXT: [[TMP50:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP46]] |
| ; CHECK-NEXT: [[TMP51:%.*]] = getelementptr i32, ptr [[TMP47]], i32 0 |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP51]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[TMP52:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP53:%.*]] = mul i64 [[TMP52]], 4 |
| ; CHECK-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP53]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT11]], ptr [[TMP54]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK7]]) |
| ; CHECK-NEXT: [[TMP55:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP56:%.*]] = mul i64 [[TMP55]], 8 |
| ; CHECK-NEXT: [[TMP57:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP56]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT13]], ptr [[TMP57]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK8]]) |
| ; CHECK-NEXT: [[TMP58:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP59:%.*]] = mul i64 [[TMP58]], 12 |
| ; CHECK-NEXT: [[TMP60:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP59]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT15]], ptr [[TMP60]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK9]]) |
| ; CHECK-NEXT: [[TMP61:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP62:%.*]] = mul i64 [[TMP61]], 4 |
| ; CHECK-NEXT: [[TMP63:%.*]] = add i64 [[INDEX6]], [[TMP62]] |
| ; CHECK-NEXT: [[TMP64:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP65:%.*]] = mul i64 [[TMP64]], 8 |
| ; CHECK-NEXT: [[TMP66:%.*]] = add i64 [[INDEX6]], [[TMP65]] |
| ; CHECK-NEXT: [[TMP67:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP68:%.*]] = mul i64 [[TMP67]], 12 |
| ; CHECK-NEXT: [[TMP69:%.*]] = add i64 [[INDEX6]], [[TMP68]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP15]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP63]], i64 [[TMP20]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP66]], i64 [[TMP25]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP69]], i64 [[TMP30]]) |
| ; CHECK-NEXT: [[TMP70:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP71:%.*]] = mul i64 [[TMP70]], 16 |
| ; CHECK-NEXT: [[INDEX_NEXT19]] = add i64 [[INDEX6]], [[TMP71]] |
| ; CHECK-NEXT: [[TMP72:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP73:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT16]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP74:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT17]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP75:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT18]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP76:%.*]] = extractelement <vscale x 4 x i1> [[TMP72]], i32 0 |
| ; CHECK-NEXT: br i1 [[TMP76]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br i1 true, label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] |
| ; CHECK: while.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] |
| ; CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 |
| ; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] |
| ; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: while.end.loopexit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %while.body |
| |
| while.body: ; preds = %while.body, %entry |
| %index = phi i64 [ %index.next, %while.body ], [ 0, %entry ] |
| %gep = getelementptr i32, ptr %ptr, i64 %index |
| store i32 %val, ptr %gep |
| %index.next = add nsw i64 %index, 1 |
| %cmp10 = icmp ult i64 %index.next, %n |
| br i1 %cmp10, label %while.body, label %while.end.loopexit, !llvm.loop !0 |
| |
| while.end.loopexit: ; preds = %while.body |
| ret void |
| } |
| |
| define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %ptr, i64 %n) #0 { |
| ; CHECK-LABEL: @cond_memset( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 16 |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16 |
| ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 |
| ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]] |
| ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP6]] |
| ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP8]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 12 |
| ; CHECK-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP10]] |
| ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 16 |
| ; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[UMAX]], [[TMP12]] |
| ; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[UMAX]], [[TMP12]] |
| ; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 |
| ; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 16 |
| ; CHECK-NEXT: [[TMP18:%.*]] = sub i64 [[UMAX]], [[TMP17]] |
| ; CHECK-NEXT: [[TMP19:%.*]] = icmp ugt i64 [[UMAX]], [[TMP17]] |
| ; CHECK-NEXT: [[TMP20:%.*]] = select i1 [[TMP19]], i64 [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 16 |
| ; CHECK-NEXT: [[TMP23:%.*]] = sub i64 [[UMAX]], [[TMP22]] |
| ; CHECK-NEXT: [[TMP24:%.*]] = icmp ugt i64 [[UMAX]], [[TMP22]] |
| ; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[TMP24]], i64 [[TMP23]], i64 0 |
| ; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 16 |
| ; CHECK-NEXT: [[TMP28:%.*]] = sub i64 [[UMAX]], [[TMP27]] |
| ; CHECK-NEXT: [[TMP29:%.*]] = icmp ugt i64 [[UMAX]], [[TMP27]] |
| ; CHECK-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i64 [[TMP28]], i64 0 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY5:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_PART_NEXT2]], i64 [[UMAX]]) |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT13:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT14:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT13]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT15:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT16:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT15]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT18:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT17]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT22:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT19:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT20:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK9:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT21:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX6]], 0 |
| ; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 4 |
| ; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP33]], 0 |
| ; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 1 |
| ; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[INDEX6]], [[TMP35]] |
| ; CHECK-NEXT: [[TMP37:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP38:%.*]] = mul i64 [[TMP37]], 8 |
| ; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 0 |
| ; CHECK-NEXT: [[TMP40:%.*]] = mul i64 [[TMP39]], 1 |
| ; CHECK-NEXT: [[TMP41:%.*]] = add i64 [[INDEX6]], [[TMP40]] |
| ; CHECK-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP43:%.*]] = mul i64 [[TMP42]], 12 |
| ; CHECK-NEXT: [[TMP44:%.*]] = add i64 [[TMP43]], 0 |
| ; CHECK-NEXT: [[TMP45:%.*]] = mul i64 [[TMP44]], 1 |
| ; CHECK-NEXT: [[TMP46:%.*]] = add i64 [[INDEX6]], [[TMP45]] |
| ; CHECK-NEXT: [[TMP47:%.*]] = getelementptr i32, ptr [[COND_PTR:%.*]], i64 [[TMP31]] |
| ; CHECK-NEXT: [[TMP48:%.*]] = getelementptr i32, ptr [[COND_PTR]], i64 [[TMP36]] |
| ; CHECK-NEXT: [[TMP49:%.*]] = getelementptr i32, ptr [[COND_PTR]], i64 [[TMP41]] |
| ; CHECK-NEXT: [[TMP50:%.*]] = getelementptr i32, ptr [[COND_PTR]], i64 [[TMP46]] |
| ; CHECK-NEXT: [[TMP51:%.*]] = getelementptr i32, ptr [[TMP47]], i32 0 |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP51]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP52:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP53:%.*]] = mul i64 [[TMP52]], 4 |
| ; CHECK-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP53]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP54]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP55:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP56:%.*]] = mul i64 [[TMP55]], 8 |
| ; CHECK-NEXT: [[TMP57:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP56]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP57]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP58:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP59:%.*]] = mul i64 [[TMP58]], 12 |
| ; CHECK-NEXT: [[TMP60:%.*]] = getelementptr i32, ptr [[TMP47]], i64 [[TMP59]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP60]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK9]], <vscale x 4 x i32> poison) |
| ; CHECK-NEXT: [[TMP61:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], zeroinitializer |
| ; CHECK-NEXT: [[TMP62:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_MASKED_LOAD10]], zeroinitializer |
| ; CHECK-NEXT: [[TMP63:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_MASKED_LOAD11]], zeroinitializer |
| ; CHECK-NEXT: [[TMP64:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_MASKED_LOAD12]], zeroinitializer |
| ; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP31]] |
| ; CHECK-NEXT: [[TMP66:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP36]] |
| ; CHECK-NEXT: [[TMP67:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP41]] |
| ; CHECK-NEXT: [[TMP68:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[TMP46]] |
| ; CHECK-NEXT: [[TMP69:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP61]], <vscale x 4 x i1> zeroinitializer |
| ; CHECK-NEXT: [[TMP70:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 4 x i1> [[TMP62]], <vscale x 4 x i1> zeroinitializer |
| ; CHECK-NEXT: [[TMP71:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 4 x i1> [[TMP63]], <vscale x 4 x i1> zeroinitializer |
| ; CHECK-NEXT: [[TMP72:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK9]], <vscale x 4 x i1> [[TMP64]], <vscale x 4 x i1> zeroinitializer |
| ; CHECK-NEXT: [[TMP73:%.*]] = getelementptr i32, ptr [[TMP65]], i32 0 |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP73]], i32 4, <vscale x 4 x i1> [[TMP69]]) |
| ; CHECK-NEXT: [[TMP74:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP75:%.*]] = mul i64 [[TMP74]], 4 |
| ; CHECK-NEXT: [[TMP76:%.*]] = getelementptr i32, ptr [[TMP65]], i64 [[TMP75]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT14]], ptr [[TMP76]], i32 4, <vscale x 4 x i1> [[TMP70]]) |
| ; CHECK-NEXT: [[TMP77:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP78:%.*]] = mul i64 [[TMP77]], 8 |
| ; CHECK-NEXT: [[TMP79:%.*]] = getelementptr i32, ptr [[TMP65]], i64 [[TMP78]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT16]], ptr [[TMP79]], i32 4, <vscale x 4 x i1> [[TMP71]]) |
| ; CHECK-NEXT: [[TMP80:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP81:%.*]] = mul i64 [[TMP80]], 12 |
| ; CHECK-NEXT: [[TMP82:%.*]] = getelementptr i32, ptr [[TMP65]], i64 [[TMP81]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT18]], ptr [[TMP82]], i32 4, <vscale x 4 x i1> [[TMP72]]) |
| ; CHECK-NEXT: [[TMP83:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP84:%.*]] = mul i64 [[TMP83]], 4 |
| ; CHECK-NEXT: [[TMP85:%.*]] = add i64 [[INDEX6]], [[TMP84]] |
| ; CHECK-NEXT: [[TMP86:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP87:%.*]] = mul i64 [[TMP86]], 8 |
| ; CHECK-NEXT: [[TMP88:%.*]] = add i64 [[INDEX6]], [[TMP87]] |
| ; CHECK-NEXT: [[TMP89:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP90:%.*]] = mul i64 [[TMP89]], 12 |
| ; CHECK-NEXT: [[TMP91:%.*]] = add i64 [[INDEX6]], [[TMP90]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP15]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT19]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP85]], i64 [[TMP20]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT20]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP88]], i64 [[TMP25]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT21]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP91]], i64 [[TMP30]]) |
| ; CHECK-NEXT: [[TMP92:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP93:%.*]] = mul i64 [[TMP92]], 16 |
| ; CHECK-NEXT: [[INDEX_NEXT22]] = add i64 [[INDEX6]], [[TMP93]] |
| ; CHECK-NEXT: [[TMP94:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP95:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT19]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP96:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT20]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP97:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT21]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer) |
| ; CHECK-NEXT: [[TMP98:%.*]] = extractelement <vscale x 4 x i1> [[TMP94]], i32 0 |
| ; CHECK-NEXT: br i1 [[TMP98]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br i1 true, label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] |
| ; CHECK: while.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; CHECK-NEXT: [[COND_GEP:%.*]] = getelementptr i32, ptr [[COND_PTR]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[COND_I32:%.*]] = load i32, ptr [[COND_GEP]], align 4 |
| ; CHECK-NEXT: [[COND_I1:%.*]] = icmp ne i32 [[COND_I32]], 0 |
| ; CHECK-NEXT: br i1 [[COND_I1]], label [[DO_STORE:%.*]], label [[WHILE_END]] |
| ; CHECK: do.store: |
| ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] |
| ; CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 |
| ; CHECK-NEXT: br label [[WHILE_END]] |
| ; CHECK: while.end: |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 |
| ; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] |
| ; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; CHECK: while.end.loopexit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %while.body |
| |
| while.body: ; preds = %while.body, %entry |
| %index = phi i64 [ %index.next, %while.end ], [ 0, %entry ] |
| %cond_gep = getelementptr i32, ptr %cond_ptr, i64 %index |
| %cond_i32 = load i32, ptr %cond_gep |
| %cond_i1 = icmp ne i32 %cond_i32, 0 |
| br i1 %cond_i1, label %do.store, label %while.end |
| |
| do.store: |
| %gep = getelementptr i32, ptr %ptr, i64 %index |
| store i32 %val, ptr %gep |
| br label %while.end |
| |
| while.end: |
| %index.next = add nsw i64 %index, 1 |
| %cmp10 = icmp ult i64 %index.next, %n |
| br i1 %cmp10, label %while.body, label %while.end.loopexit, !llvm.loop !0 |
| |
| while.end.loopexit: ; preds = %while.body |
| ret void |
| } |
| |
| !0 = distinct !{!0, !1} |
| !1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} |
| |
| attributes #0 = { "target-features"="+sve" } |