From b75041b42ac6b60e6bc5ed07aec07fe654d00ebb Mon Sep 17 00:00:00 2001 From: Daniel Neilson Date: Tue, 18 Jul 2017 01:06:53 +0000 Subject: [PATCH] Add element-atomic mem intrinsic canary tests for Efficiency Sanitizer. Summary: Add canary tests to verify that ESAN currently does nothing with the element atomic memory intrinsics for memcpy, memmove, and memset. Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have been added to the MemIntrinsic class hierarchy. These will act as a reminder to verify that ESAN handles these intrinsics properly once they have been added to that class hierarchy. Reviewers: reames Reviewed By: reames Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D35508 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308250 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../EfficiencySanitizer/working_set_basic.ll | 33 +++++++++++++++++++ .../EfficiencySanitizer/working_set_slow.ll | 32 ++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll index 3457cfc7e27..344ad86e99e 100644 --- a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll +++ b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll @@ -233,6 +233,39 @@ entry: ; CHECK: ret void } +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Ensure that esan doesn't convert element atomic memory intrinsics to +; calls. + +declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind +declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind +declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind + +define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) { + ; CHECK-LABEL: elementAtomic_memCpyTest + ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ret void +} + +define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) { + ; CHECK-LABEL: elementAtomic_memMoveTest + ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ret void +} + +define void @elementAtomic_memSetTest(i8* nocapture %x) { + ; CHECK-LABEL: elementAtomic_memSetTest + ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1) + ret void +} + + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Top-level: diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll b/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll index 1c5978e5286..22c8d5c59a1 100644 --- a/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll +++ b/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll @@ -250,6 +250,38 @@ entry: ; CHECK: ret void } +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Ensure that esan doesn't convert element atomic memory intrinsics to +; calls. + +declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind +declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind +declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind + +define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) { + ; CHECK-LABEL: elementAtomic_memCpyTest + ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ret void +} + +define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) { + ; CHECK-LABEL: elementAtomic_memMoveTest + ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1) + ret void +} + +define void @elementAtomic_memSetTest(i8* nocapture %x) { + ; CHECK-LABEL: elementAtomic_memSetTest + ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1) + ; CHECK-NEXT: ret void + tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1) + ret void +} + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Top-level: -- 2.49.0