| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s --implicit-check-not="call void @__msan_warning" |
| ; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-not="call void @__msan_warning" |
| ; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ORIGINS --implicit-check-not="call void @__msan_warning" |
| |
| ; Just don't crash. |
| ; RUN: opt < %s -msan-check-access-address=1 -msan-track-origins=1 -S -passes=msan -msan-kernel |
| |
| target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" |
| target triple = "x86_64-unknown-linux-gnu" |
| |
| declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, i32, <4 x i1>) |
| declare <4 x double> @llvm.masked.load.v4f64.p0(ptr, i32, <4 x i1>, <4 x double>) |
| declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>) |
| declare void @llvm.masked.scatter.v8i32.v8p0 (<8 x i32>, <8 x ptr>, i32, <8 x i1>) |
| declare <16 x float> @llvm.masked.expandload.v16f32(ptr, <16 x i1>, <16 x float>) |
| declare void @llvm.masked.compressstore.v16f32(<16 x float>, ptr, <16 x i1>) |
| |
| define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory { |
| ; CHECK-LABEL: @Store( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @Store( |
| ; ADDR-NEXT: entry: |
| ; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 |
| ; ADDR-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ADDR-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 |
| ; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]] |
| ; ADDR: 7: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7:[0-9]+]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 8: |
| ; ADDR-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @Store( |
| ; ORIGINS-NEXT: entry: |
| ; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4 |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; ORIGINS-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP7]], align 4 |
| ; ORIGINS-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 1 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP8]], align 4 |
| ; ORIGINS-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 2 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP9]], align 4 |
| ; ORIGINS-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[TMP7]], i32 3 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP10]], align 4 |
| ; ORIGINS-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i32 4 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP11]], align 4 |
| ; ORIGINS-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP7]], i32 5 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP12]], align 4 |
| ; ORIGINS-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP7]], i32 6 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP13]], align 4 |
| ; ORIGINS-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP7]], i32 7 |
| ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP14]], align 4 |
| ; ORIGINS-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| entry: |
| tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask) |
| ret void |
| } |
| |
| define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory { |
| ; CHECK-LABEL: @Load( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; CHECK-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]]) |
| ; CHECK-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <4 x double> [[X]] |
| ; |
| ; ADDR-LABEL: @Load( |
| ; ADDR-NEXT: entry: |
| ; ADDR-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 |
| ; ADDR-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ADDR-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP2]]) |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0 |
| ; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP1]] to i4 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] |
| ; ADDR: 7: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 8: |
| ; ADDR-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]]) |
| ; ADDR-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <4 x double> [[X]] |
| ; |
| ; ORIGINS-LABEL: @Load( |
| ; ORIGINS-NEXT: entry: |
| ; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4 |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; ORIGINS-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]]) |
| ; ORIGINS-NEXT: [[TMP8:%.*]] = sub <4 x i1> zeroinitializer, [[MASK]] |
| ; ORIGINS-NEXT: [[TMP9:%.*]] = sext <4 x i1> [[TMP8]] to <4 x i64> |
| ; ORIGINS-NEXT: [[TMP10:%.*]] = and <4 x i64> [[TMP0]], [[TMP9]] |
| ; ORIGINS-NEXT: [[TMP11:%.*]] = bitcast <4 x i64> [[TMP10]] to i256 |
| ; ORIGINS-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP11]], 0 |
| ; ORIGINS-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP7]], align 4 |
| ; ORIGINS-NEXT: [[TMP13:%.*]] = select i1 [[_MSCMP]], i32 [[TMP1]], i32 [[TMP12]] |
| ; ORIGINS-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]]) |
| ; ORIGINS-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <4 x double> [[X]] |
| ; |
| entry: |
| %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v) |
| ret <4 x double> %x |
| } |
| |
| define void @StoreNoSanitize(ptr %p, <4 x i64> %v, <4 x i1> %mask) { |
| ; CHECK-LABEL: @StoreNoSanitize( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080 |
| ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr |
| ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @StoreNoSanitize( |
| ; ADDR-NEXT: entry: |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080 |
| ; ADDR-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr |
| ; ADDR-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; ADDR-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @StoreNoSanitize( |
| ; ORIGINS-NEXT: entry: |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4 |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ORIGINS-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP5]], align 4 |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 1 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP6]], align 4 |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP5]], i32 2 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP7]], align 4 |
| ; ORIGINS-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP5]], i32 3 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP8]], align 4 |
| ; ORIGINS-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP5]], i32 4 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP9]], align 4 |
| ; ORIGINS-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[TMP5]], i32 5 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP10]], align 4 |
| ; ORIGINS-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP5]], i32 6 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP11]], align 4 |
| ; ORIGINS-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i32 7 |
| ; ORIGINS-NEXT: store i32 0, ptr [[TMP12]], align 4 |
| ; ORIGINS-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| entry: |
| tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask) |
| ret void |
| } |
| |
| define <4 x double> @LoadNoSanitize(ptr %p, <4 x double> %v, <4 x i1> %mask) { |
| ; CHECK-LABEL: @LoadNoSanitize( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]]) |
| ; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <4 x double> [[X]] |
| ; |
| ; ADDR-LABEL: @LoadNoSanitize( |
| ; ADDR-NEXT: entry: |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]]) |
| ; ADDR-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <4 x double> [[X]] |
| ; |
| ; ORIGINS-LABEL: @LoadNoSanitize( |
| ; ORIGINS-NEXT: entry: |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]]) |
| ; ORIGINS-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <4 x double> [[X]] |
| ; |
| entry: |
| %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v) |
| ret <4 x double> %x |
| } |
| |
| ; FIXME: Provide real implementation. |
| define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory { |
| ; CHECK-LABEL: @Gather( |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i64> [[TMP2]], splat (i64 87960930222080) |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr <16 x i64> [[TMP3]] to <16 x ptr> |
| ; CHECK-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP4]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]]) |
| ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; CHECK-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ADDR-LABEL: @Gather( |
| ; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer |
| ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> |
| ; ADDR-NEXT: [[TMP5:%.*]] = xor <16 x i64> [[TMP4]], splat (i64 87960930222080) |
| ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr <16 x i64> [[TMP5]] to <16 x ptr> |
| ; ADDR-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP6]], i32 4, <16 x i1> [[MASK]], <16 x i32> [[TMP3]]) |
| ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP1]] to i16 |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i16 [[TMP7]], 0 |
| ; ADDR-NEXT: [[TMP8:%.*]] = bitcast <16 x i64> [[_MSMASKEDPTRS]] to i1024 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i1024 [[TMP8]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] |
| ; ADDR: 9: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 10: |
| ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ADDR-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ORIGINS-LABEL: @Gather( |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 136) to ptr), align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = xor <16 x i64> [[TMP3]], splat (i64 87960930222080) |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <16 x i64> [[TMP4]] to <16 x ptr> |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = add <16 x i64> [[TMP4]], splat (i64 17592186044416) |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr <16 x i64> [[TMP6]] to <16 x ptr> |
| ; ORIGINS-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP5]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]]) |
| ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ORIGINS-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <16 x float> [[RET]] |
| ; |
| %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru) |
| ret <16 x float> %ret |
| } |
| |
| define <16 x float> @GatherNoSanitize(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) { |
| ; CHECK-LABEL: @GatherNoSanitize( |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]]) |
| ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ADDR-LABEL: @GatherNoSanitize( |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> zeroinitializer, <16 x i64> zeroinitializer |
| ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ADDR-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ORIGINS-LABEL: @GatherNoSanitize( |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ORIGINS-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <16 x float> [[RET]] |
| ; |
| %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru) |
| ret <16 x float> %ret |
| } |
| |
| ; FIXME: Provide real implementation. |
| define void @Scatter(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) sanitize_memory { |
| ; CHECK-LABEL: @Scatter( |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[TMP2]], splat (i64 87960930222080) |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr <8 x i64> [[TMP3]] to <8 x ptr> |
| ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP4]], i32 8, <8 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @Scatter( |
| ; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer |
| ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; ADDR-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[TMP4]], splat (i64 87960930222080) |
| ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr <8 x i64> [[TMP5]] to <8 x ptr> |
| ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP3]], <8 x ptr> [[TMP6]], i32 8, <8 x i1> [[MASK]]) |
| ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <8 x i1> [[TMP1]] to i8 |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i8 [[TMP7]], 0 |
| ; ADDR-NEXT: [[TMP8:%.*]] = bitcast <8 x i64> [[_MSMASKEDPTRS]] to i512 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP8]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] |
| ; ADDR: 9: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 10: |
| ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @Scatter( |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[TMP3]], splat (i64 87960930222080) |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr> |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = add <8 x i64> [[TMP4]], splat (i64 17592186044416) |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr <8 x i64> [[TMP6]] to <8 x ptr> |
| ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP5]], i32 8, <8 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask) |
| ret void |
| } |
| |
| define void @ScatterNoSanitize(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) { |
| ; CHECK-LABEL: @ScatterNoSanitize( |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080) |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr> |
| ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @ScatterNoSanitize( |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> zeroinitializer, <8 x i64> zeroinitializer |
| ; ADDR-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; ADDR-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080) |
| ; ADDR-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr> |
| ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK]]) |
| ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @ScatterNoSanitize( |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64> |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080) |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr> |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = add <8 x i64> [[TMP2]], splat (i64 17592186044416) |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr> |
| ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask) |
| ret void |
| } |
| |
| ; FIXME: Provide real implementation. |
| define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory { |
| ; CHECK-LABEL: @ExpandLoad( |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP4]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]]) |
| ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; CHECK-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ADDR-LABEL: @ExpandLoad( |
| ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 |
| ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ADDR-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP6]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP3]]) |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 |
| ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] |
| ; ADDR: 8: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 9: |
| ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ADDR-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ORIGINS-LABEL: @ExpandLoad( |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -4 |
| ; ORIGINS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr |
| ; ORIGINS-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP5]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]]) |
| ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ORIGINS-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <16 x float> [[RET]] |
| ; |
| %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) |
| ret <16 x float> %ret |
| } |
| |
| define <16 x float> @ExpandLoadNoSanitize(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) { |
| ; CHECK-LABEL: @ExpandLoadNoSanitize( |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]]) |
| ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ADDR-LABEL: @ExpandLoadNoSanitize( |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ADDR-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ADDR-NEXT: ret <16 x float> [[RET]] |
| ; |
| ; ORIGINS-LABEL: @ExpandLoadNoSanitize( |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]]) |
| ; ORIGINS-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGINS-NEXT: ret <16 x float> [[RET]] |
| ; |
| %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) |
| ret <16 x float> %ret |
| } |
| |
| ; FIXME: Provide real implementation. |
| define void @CompressStore(<16 x float> %value, ptr %ptr, <16 x i1> %mask) sanitize_memory { |
| ; CHECK-LABEL: @CompressStore( |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP4]], <16 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @CompressStore( |
| ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 |
| ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 |
| ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ADDR-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP3]], ptr [[TMP6]], <16 x i1> [[MASK:%.*]]) |
| ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 |
| ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16 |
| ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0 |
| ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] |
| ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] |
| ; ADDR: 8: |
| ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]] |
| ; ADDR-NEXT: unreachable |
| ; ADDR: 9: |
| ; ADDR-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @CompressStore( |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -4 |
| ; ORIGINS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr |
| ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP5]], <16 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask) |
| ret void |
| } |
| |
| define void @CompressStoreNoSanitize(<16 x float> %value, ptr %ptr, <16 x i1> %mask) { |
| ; CHECK-LABEL: @CompressStoreNoSanitize( |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; CHECK-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]]) |
| ; CHECK-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ADDR-LABEL: @CompressStoreNoSanitize( |
| ; ADDR-NEXT: call void @llvm.donothing() |
| ; ADDR-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ADDR-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; ADDR-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; ADDR-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]]) |
| ; ADDR-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; ADDR-NEXT: ret void |
| ; |
| ; ORIGINS-LABEL: @CompressStoreNoSanitize( |
| ; ORIGINS-NEXT: call void @llvm.donothing() |
| ; ORIGINS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 |
| ; ORIGINS-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; ORIGINS-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; ORIGINS-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 |
| ; ORIGINS-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 |
| ; ORIGINS-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]]) |
| ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]]) |
| ; ORIGINS-NEXT: ret void |
| ; |
| call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask) |
| ret void |
| } |