From: Derek Bruening <bruening@google.com>
Date: Wed, 6 Jul 2016 20:13:53 +0000 (+0000)
Subject: [esan|wset] Fix incorrect memory size assert
X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=1a6ed75eac241bee45cfe39ce6ce7477bd62de9c;p=llvm

[esan|wset] Fix incorrect memory size assert

Summary:
Fixes an incorrect assert that fails on 128-bit-sized loads or stores.
Augments the wset tests to include this case.

Reviewers: aizatsky

Subscribers: vitalybuka, zhaoqin, kcc, eugenis, llvm-commits

Differential Revision: http://reviews.llvm.org/D22062

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274666 91177308-0d34-0410-b5e6-96231b3b80d8
---

diff --git a/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp b/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
index 6640e7b54bd..111b0875388 100644
--- a/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
@@ -671,7 +671,7 @@ bool EfficiencySanitizer::instrumentLoadOrStore(Instruction *I,
       NumFastpaths++;
       return true;
     }
-    if (Alignment == 0 || Alignment >= 8 || (Alignment % TypeSizeBytes) == 0)
+    if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0)
       OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx];
     else
       OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx];
@@ -832,7 +832,7 @@ bool EfficiencySanitizer::instrumentFastpathWorkingSet(
   // getMemoryAccessFuncIndex has already ruled out a size larger than 16
   // and thus larger than a cache line for platforms this tool targets
   // (and our shadow memory setup assumes 64-byte cache lines).
-  assert(TypeSize <= 64);
+  assert(TypeSize <= 128);
   if (!(TypeSize == 8 ||
         (Alignment % (TypeSize / 8)) == 0)) {
     if (ClAssumeIntraCacheLine)
diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
index a26a4ce54dd..3457cfc7e27 100644
--- a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
+++ b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
@@ -90,6 +90,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @aligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 16
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Not guaranteed to be intra-cache-line, but our defaults are to
 ; assume they are:
@@ -157,6 +178,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @unaligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 8
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Ensure that esan converts intrinsics to calls:
 
diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll b/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll
index cee9a8ba3e9..6eaa5e36a2a 100644
--- a/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll
+++ b/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll
@@ -91,6 +91,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @aligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 16
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Not guaranteed to be intra-cache-line
 
@@ -123,3 +144,13 @@ entry:
 ; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 4
 ; CHECK-NEXT:   ret i64 %tmp1
 }
+
+define i128 @unaligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 8
+  ret i128 %tmp1
+; CHECK:        %0 = bitcast i128* %a to i8*
+; CHECK-NEXT:   call void @__esan_unaligned_load16(i8* %0)
+; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT:   ret i128 %tmp1
+}