NumFastpaths++;
return true;
}
- if (Alignment == 0 || Alignment >= 8 || (Alignment % TypeSizeBytes) == 0)
+ if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0)
OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx];
else
OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx];
// getMemoryAccessFuncIndex has already ruled out a size larger than 16
// and thus larger than a cache line for platforms this tool targets
// (and our shadow memory setup assumes 64-byte cache lines).
- assert(TypeSize <= 64);
+ assert(TypeSize <= 128);
if (!(TypeSize == 8 ||
(Alignment % (TypeSize / 8)) == 0)) {
if (ClAssumeIntraCacheLine)
; CHECK-NEXT: ret i64 %tmp1
}
+define i128 @aligned16(i128* %a) {
+entry:
+ %tmp1 = load i128, i128* %a, align 16
+ ret i128 %tmp1
+; CHECK: %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT: %1 = and i64 %0, 17592186044415
+; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT: %3 = lshr i64 %2, 6
+; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: %5 = load i8, i8* %4
+; CHECK-NEXT: %6 = and i8 %5, -127
+; CHECK-NEXT: %7 = icmp ne i8 %6, -127
+; CHECK-NEXT: br i1 %7, label %8, label %11
+; CHECK: %9 = or i8 %5, -127
+; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: store i8 %9, i8* %10
+; CHECK-NEXT: br label %11
+; CHECK: %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT: ret i128 %tmp1
+}
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Not guaranteed to be intra-cache-line, but our defaults are to
; assume they are:
; CHECK-NEXT: ret i64 %tmp1
}
+define i128 @unaligned16(i128* %a) {
+entry:
+ %tmp1 = load i128, i128* %a, align 8
+ ret i128 %tmp1
+; CHECK: %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT: %1 = and i64 %0, 17592186044415
+; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT: %3 = lshr i64 %2, 6
+; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: %5 = load i8, i8* %4
+; CHECK-NEXT: %6 = and i8 %5, -127
+; CHECK-NEXT: %7 = icmp ne i8 %6, -127
+; CHECK-NEXT: br i1 %7, label %8, label %11
+; CHECK: %9 = or i8 %5, -127
+; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: store i8 %9, i8* %10
+; CHECK-NEXT: br label %11
+; CHECK: %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT: ret i128 %tmp1
+}
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Ensure that esan converts intrinsics to calls:
; CHECK-NEXT: ret i64 %tmp1
}
+define i128 @aligned16(i128* %a) {
+entry:
+ %tmp1 = load i128, i128* %a, align 16
+ ret i128 %tmp1
+; CHECK: %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT: %1 = and i64 %0, 17592186044415
+; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT: %3 = lshr i64 %2, 6
+; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: %5 = load i8, i8* %4
+; CHECK-NEXT: %6 = and i8 %5, -127
+; CHECK-NEXT: %7 = icmp ne i8 %6, -127
+; CHECK-NEXT: br i1 %7, label %8, label %11
+; CHECK: %9 = or i8 %5, -127
+; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT: store i8 %9, i8* %10
+; CHECK-NEXT: br label %11
+; CHECK: %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT: ret i128 %tmp1
+}
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Not guaranteed to be intra-cache-line
; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4
; CHECK-NEXT: ret i64 %tmp1
}
+
+define i128 @unaligned16(i128* %a) {
+entry:
+ %tmp1 = load i128, i128* %a, align 8
+ ret i128 %tmp1
+; CHECK: %0 = bitcast i128* %a to i8*
+; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
+; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT: ret i128 %tmp1
+}