blob: 07a3fe7d0bbc5e91f5dff16d0674813274db7e4b [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
define void @lround_i32f32(ptr %x, ptr %y, i32 %n) {
; CHECK-LABEL: @lround_i32f32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]])
; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load float, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
%l2 = load float, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
%l4 = load float, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
%l6 = load float, ptr %arrayidx.3, align 4
%l1 = tail call i32 @llvm.lround.i32.f32(float %l0)
%l3 = tail call i32 @llvm.lround.i32.f32(float %l2)
%l5 = tail call i32 @llvm.lround.i32.f32(float %l4)
%l7 = tail call i32 @llvm.lround.i32.f32(float %l6)
store i32 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
store i32 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
store i32 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
store i32 %l7, ptr %arrayidx2.3, align 4
ret void
}
define void @lround_i32f64(ptr %x, ptr %y, i32 %n) {
; CHECK-LABEL: @lround_i32f64(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]])
; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load double, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
%l2 = load double, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
%l4 = load double, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
%l6 = load double, ptr %arrayidx.3, align 4
%l1 = tail call i32 @llvm.lround.i32.f64(double %l0)
%l3 = tail call i32 @llvm.lround.i32.f64(double %l2)
%l5 = tail call i32 @llvm.lround.i32.f64(double %l4)
%l7 = tail call i32 @llvm.lround.i32.f64(double %l6)
store i32 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
store i32 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
store i32 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
store i32 %l7, ptr %arrayidx2.3, align 4
ret void
}
define void @lround_i64f32(ptr %x, ptr %y, i64 %n) {
; CHECK-LABEL: @lround_i64f32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]])
; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load float, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
%l2 = load float, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
%l4 = load float, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
%l6 = load float, ptr %arrayidx.3, align 4
%l1 = tail call i64 @llvm.lround.i64.f32(float %l0)
%l3 = tail call i64 @llvm.lround.i64.f32(float %l2)
%l5 = tail call i64 @llvm.lround.i64.f32(float %l4)
%l7 = tail call i64 @llvm.lround.i64.f32(float %l6)
store i64 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
store i64 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
store i64 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
store i64 %l7, ptr %arrayidx2.3, align 4
ret void
}
define void @lround_i64f64(ptr %x, ptr %y, i64 %n) {
; CHECK-LABEL: @lround_i64f64(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]])
; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load double, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
%l2 = load double, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
%l4 = load double, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
%l6 = load double, ptr %arrayidx.3, align 4
%l1 = tail call i64 @llvm.lround.i64.f64(double %l0)
%l3 = tail call i64 @llvm.lround.i64.f64(double %l2)
%l5 = tail call i64 @llvm.lround.i64.f64(double %l4)
%l7 = tail call i64 @llvm.lround.i64.f64(double %l6)
store i64 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
store i64 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
store i64 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
store i64 %l7, ptr %arrayidx2.3, align 4
ret void
}
define void @llround_i64f32(ptr %x, ptr %y, i64 %n) {
; CHECK-LABEL: @llround_i64f32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]])
; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load float, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
%l2 = load float, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
%l4 = load float, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
%l6 = load float, ptr %arrayidx.3, align 4
%l1 = tail call i64 @llvm.llround.i64.f32(float %l0)
%l3 = tail call i64 @llvm.llround.i64.f32(float %l2)
%l5 = tail call i64 @llvm.llround.i64.f32(float %l4)
%l7 = tail call i64 @llvm.llround.i64.f32(float %l6)
store i64 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
store i64 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
store i64 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
store i64 %l7, ptr %arrayidx2.3, align 4
ret void
}
define void @llround_i64f64(ptr %x, ptr %y, i64 %n) {
; CHECK-LABEL: @llround_i64f64(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]])
; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]])
; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]])
; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]])
; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%l0 = load double, ptr %x, align 4
%arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
%l2 = load double, ptr %arrayidx.1, align 4
%arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
%l4 = load double, ptr %arrayidx.2, align 4
%arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
%l6 = load double, ptr %arrayidx.3, align 4
%l1 = tail call i64 @llvm.llround.i64.f64(double %l0)
%l3 = tail call i64 @llvm.llround.i64.f64(double %l2)
%l5 = tail call i64 @llvm.llround.i64.f64(double %l4)
%l7 = tail call i64 @llvm.llround.i64.f64(double %l6)
store i64 %l1, ptr %y, align 4
%arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
store i64 %l3, ptr %arrayidx2.1, align 4
%arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
store i64 %l5, ptr %arrayidx2.2, align 4
%arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
store i64 %l7, ptr %arrayidx2.3, align 4
ret void
}
declare i32 @llvm.lround.i32.f32(float)
declare i64 @llvm.lround.i64.f32(float)
declare i64 @llvm.lround.i64.f64(double)
declare i64 @llvm.llround.i64.f32(float)
declare i64 @llvm.llround.i64.f64(double)