Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
- Value *emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
+ void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
private:
LLVMContext *C;
Constant *ShadowGlobal;
Value *LocalDynamicShadow = nullptr;
+ Value *StackBaseTag = nullptr;
GlobalValue *ThreadPtrGlobal = nullptr;
};
// x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
// masks.
// The list does not include the value 255, which is used for UAR.
- static unsigned FastMasks[] = {
- 0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24,
- 28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120,
- 124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
+ //
+ // Because we are more likely to use earlier elements of this list than later
+ // ones, it is sorted in increasing order of probability of collision with a
+ // mask allocated (temporally) nearby. The program that generated this list
+ // can be found at:
+ // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
+ static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
+ 48, 16, 120, 248, 56, 24, 8, 124, 252,
+ 60, 28, 12, 4, 126, 254, 62, 30, 14,
+ 6, 2, 127, 63, 31, 15, 7, 3, 1};
return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
}
Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
if (ClGenerateTagsWithCalls)
return getNextTagWithCall(IRB);
+ if (StackBaseTag)
+ return StackBaseTag;
// FIXME: use addressofreturnaddress (but implement it in aarch64 backend
// first).
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
GV->setComdat(Comdat);
}
-Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
- bool WithFrameRecord) {
- if (!Mapping.InTls)
- return getDynamicShadowNonTls(IRB);
+void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
+ if (!Mapping.InTls) {
+ LocalDynamicShadow = getDynamicShadowNonTls(IRB);
+ return;
+ }
- if (!WithFrameRecord && TargetTriple.isAndroid())
- return getDynamicShadowIfunc(IRB);
+ if (!WithFrameRecord && TargetTriple.isAndroid()) {
+ LocalDynamicShadow = getDynamicShadowIfunc(IRB);
+ return;
+ }
Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
assert(SlotPtr);
TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
if (WithFrameRecord) {
+ StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
+
// Prepare ring buffer data.
auto PC = IRB.CreatePtrToInt(F, IntptrTy);
auto GetStackPointerFn =
IRB.CreateCall(GetStackPointerFn,
{Constant::getNullValue(IRB.getInt32Ty())}),
IntptrTy);
- // Mix SP and PC. TODO: also add the tag to the mix.
+ // Mix SP and PC.
// Assumptions:
// PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
// SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
// Get shadow base address by aligning RecordPtr up.
// Note: this is not correct if the pointer is already aligned.
// Runtime library will make sure this never happens.
- Value *ShadowBase = IRB.CreateAdd(
+ LocalDynamicShadow = IRB.CreateAdd(
IRB.CreateOr(
ThreadLongMaybeUntagged,
ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
- ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
- return ShadowBase;
+ LocalDynamicShadow = IRB.CreateIntToPtr(LocalDynamicShadow, Int8PtrTy);
}
bool HWAddressSanitizer::instrumentLandingPads(
Instruction *InsertPt = &*F.getEntryBlock().begin();
IRBuilder<> EntryIRB(InsertPt);
- LocalDynamicShadow = emitPrologue(EntryIRB,
- /*WithFrameRecord*/ ClRecordStackHistory &&
- !AllocasToInstrument.empty());
+ emitPrologue(EntryIRB,
+ /*WithFrameRecord*/ ClRecordStackHistory &&
+ !AllocasToInstrument.empty());
bool Changed = false;
if (!AllocasToInstrument.empty()) {
Changed |= instrumentMemAccess(Inst);
LocalDynamicShadow = nullptr;
+ StackBaseTag = nullptr;
return Changed;
}
%nodebug3 = alloca i8*
%a = alloca i8*
%b = alloca i8*
- ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 4)
+ ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 32)
call void @llvm.dbg.declare(metadata i8** %a, metadata !12, metadata !DIExpression()), !dbg !14
- ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 4)
+ ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 32)
call void @llvm.dbg.declare(metadata i8** %a, metadata !12, metadata !DIExpression()), !dbg !14
- ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 6)
+ ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 96)
call void @llvm.dbg.declare(metadata i8** %b, metadata !13, metadata !DIExpression()), !dbg !14
- ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 6)
+ ; CHECK: @llvm.dbg.declare{{.*}} !DIExpression(DW_OP_LLVM_tag_offset, 96)
call void @llvm.dbg.declare(metadata i8** %b, metadata !13, metadata !DIExpression()), !dbg !14
call void @g(i8** %nodebug0, i8** %nodebug1, i8** %nodebug2, i8** %nodebug3, i8** %a, i8** %b)
ret void, !dbg !15