blob: 48f0c58ceb0534a2900f14d566925e17cc2c55e9 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=dse -S | FileCheck %s
@BUFFER = external local_unnamed_addr global [0 x i8], align 1
define void @ArrayTestFullyOverlapping(i64 %0) {
;
; The DSE pass will try to kill the store of size i32 using the store of
; size i64 because they fully overlap, in fact:
;
; - they use the same base pointer (in SCEV style '@BUFFER + %0')
; - the offset between the two stores is 32 bits
; - the size of the earlier store is 32 bits
; - the size of the later store is 64 bits
;
; CHECK-LABEL: @ArrayTestFullyOverlapping(
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP0:%.*]], -8
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
; CHECK-NEXT: store i64 0, ptr [[TMP3]], align 4
; CHECK-NEXT: ret void
;
%2 = add i64 %0, -8
%3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
%4 = add i64 %0, -4
%5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
store i32 1, ptr %5
store i64 0, ptr %3
ret void
}
define void @VectorTestFullyOverlapping(ptr %arg, i32 %i) {
; CHECK-LABEL: @VectorTestFullyOverlapping(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I2:%.*]] = zext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
; CHECK-NEXT: store <2 x float> zeroinitializer, ptr [[I3]], align 16
; CHECK-NEXT: ret void
;
bb:
%i7 = add nuw nsw i32 %i, 1
%i8 = zext i32 %i7 to i64
%i9 = getelementptr inbounds float, ptr %arg, i64 %i8
store float 0.0, ptr %i9, align 4
%i2 = zext i32 %i to i64
%i3 = getelementptr inbounds float, ptr %arg, i64 %i2
store <2 x float> <float 0.0, float 0.0>, ptr %i3, align 16
ret void
}
define void @ScalableVectorTestFullyOverlapping(ptr %arg, i32 %i) vscale_range(1, 2) {
; CHECK-LABEL: @ScalableVectorTestFullyOverlapping(
; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I:%.*]], 1
; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_1]]
; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_ARG_I_1]], align 4
; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
; CHECK-NEXT: ret void
;
%i.1 = add nuw nsw i32 %i, 1
%ext.i.1 = zext i32 %i.1 to i64
%gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
store float 0.0, ptr %gep.arg.i.1
%ext.i = zext i32 %i to i64
%gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
ret void
}
define void @ScalableVectorTestFullyOverlapping2(ptr %arg, i32 %i) {
; CHECK-LABEL: @ScalableVectorTestFullyOverlapping2(
; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I:%.*]], 1
; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_1]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_1]], align 8
; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
; CHECK-NEXT: store <vscale x 4 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 16
; CHECK-NEXT: ret void
;
%i.1 = add nuw nsw i32 %i, 1
%ext.i.1 = zext i32 %i.1 to i64
%gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.1
%ext.i = zext i32 %i to i64
%gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
store <vscale x 4 x float> zeroinitializer, ptr %gep.arg.i
ret void
}
define void @ScalableVectorTestNonOverlapping(ptr %arg, i32 %i) vscale_range(1, 2) {
; CHECK-LABEL: @ScalableVectorTestNonOverlapping(
; CHECK-NEXT: [[I_10:%.*]] = add nuw nsw i32 [[I:%.*]], 10
; CHECK-NEXT: [[EXT_I_10:%.*]] = zext i32 [[I_10]] to i64
; CHECK-NEXT: [[GEP_ARG_I_10:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_10]]
; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_ARG_I_10]], align 4
; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
; CHECK-NEXT: ret void
;
%i.10 = add nuw nsw i32 %i, 10
%ext.i.10 = zext i32 %i.10 to i64
%gep.arg.i.10 = getelementptr inbounds float, ptr %arg, i64 %ext.i.10
store float 0.0, ptr %gep.arg.i.10
%ext.i = zext i32 %i to i64
%gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
ret void
}
define void @ScalableVectorTestNonOverlapping2(ptr %arg, i32 %i) vscale_range(1, 2) {
; CHECK-LABEL: @ScalableVectorTestNonOverlapping2(
; CHECK-NEXT: [[I_10:%.*]] = add nuw nsw i32 [[I:%.*]], 10
; CHECK-NEXT: [[EXT_I_10:%.*]] = zext i32 [[I_10]] to i64
; CHECK-NEXT: [[GEP_ARG_I_10:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_10]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_10]], align 8
; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
; CHECK-NEXT: store <vscale x 4 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 16
; CHECK-NEXT: ret void
;
%i.10 = add nuw nsw i32 %i, 10
%ext.i.10 = zext i32 %i.10 to i64
%gep.arg.i.10 = getelementptr inbounds float, ptr %arg, i64 %ext.i.10
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.10
%ext.i = zext i32 %i to i64
%gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
store <vscale x 4 x float> zeroinitializer, ptr %gep.arg.i
ret void
}
define void @ArrayTestPartiallyOverlapping(i64 %0) {
;
; The DSE pass will not kill the store because the overlap is partial
; and won't fully clobber the i32 store.
;
; CHECK-LABEL: @ArrayTestPartiallyOverlapping(
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP0:%.*]], 10
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 15
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP4]]
; CHECK-NEXT: store i32 1, ptr [[TMP5]], align 4
; CHECK-NEXT: store i64 0, ptr [[TMP3]], align 4
; CHECK-NEXT: ret void
;
%2 = add i64 %0, 10
%3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
%4 = add i64 %0, 15
%5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
store i32 1, ptr %5
store i64 0, ptr %3
ret void
}
define void @VectorTestPartiallyOverlapping(ptr %arg, i32 %i) {
;
; The DSE pass will not kill the store because the overlap is partial
; and won't fully clobber the original store.
;
; CHECK-LABEL: @VectorTestPartiallyOverlapping(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I2:%.*]] = zext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
; CHECK-NEXT: store <2 x float> splat (float 1.000000e+00), ptr [[I3]], align 16
; CHECK-NEXT: [[I5:%.*]] = add nuw nsw i32 [[I]], 1
; CHECK-NEXT: [[I6:%.*]] = zext i32 [[I5]] to i64
; CHECK-NEXT: [[I7:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[I6]]
; CHECK-NEXT: store <2 x float> zeroinitializer, ptr [[I7]], align 16
; CHECK-NEXT: ret void
;
bb:
%i2 = zext i32 %i to i64
%i3 = getelementptr inbounds float, ptr %arg, i64 %i2
store <2 x float> <float 1.000000e+00, float 1.000000e+00>, ptr %i3, align 16
%i5 = add nuw nsw i32 %i, 1
%i6 = zext i32 %i5 to i64
%i7 = getelementptr inbounds float, ptr %arg, i64 %i6
store <2 x float> <float 0.0, float 0.0>, ptr %i7, align 16
ret void
}
define void @ScalableVectorTestPartiallyOverlapping(ptr %arg, i32 %i) {
;
; The DSE pass will not kill the store because the overlap is partial
; and won't fully clobber the original store.
;
; CHECK-LABEL: @ScalableVectorTestPartiallyOverlapping(
; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I]], 1
; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I_1]]
; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_1]], align 8
; CHECK-NEXT: ret void
;
%ext.i = zext i32 %i to i64
%gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
%i.1 = add nuw nsw i32 %i, 1
%ext.i.1 = zext i32 %i.1 to i64
%gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.1
ret void
}