.addReg(Reg)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
*STI);
- MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
+ MCSymbol *HandlePartialSym = OutContext.createTempSymbol();
OutStreamer->EmitInstruction(
MCInstBuilder(AArch64::Bcc)
.addImm(AArch64CC::NE)
- .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
+ .addExpr(MCSymbolRefExpr::create(HandlePartialSym, OutContext)),
*STI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->EmitLabel(ReturnSym);
OutStreamer->EmitInstruction(
MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
- OutStreamer->EmitLabel(HandleMismatchSym);
+ OutStreamer->EmitLabel(HandlePartialSym);
+ OutStreamer->EmitInstruction(MCInstBuilder(AArch64::SUBSWri)
+ .addReg(AArch64::WZR)
+ .addReg(AArch64::W16)
+ .addImm(15)
+ .addImm(0),
+ *STI);
+ MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::Bcc)
+ .addImm(AArch64CC::HI)
+ .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
+ *STI);
+
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::ANDXri)
+ .addReg(AArch64::X17)
+ .addReg(Reg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
+ *STI);
+ size_t Size = 1 << (AccessInfo & 0xf);
+ if (Size != 1)
+ OutStreamer->EmitInstruction(MCInstBuilder(AArch64::ADDXri)
+ .addReg(AArch64::X17)
+ .addReg(AArch64::X17)
+ .addImm(Size - 1)
+ .addImm(0),
+ *STI);
+ OutStreamer->EmitInstruction(MCInstBuilder(AArch64::SUBSWrs)
+ .addReg(AArch64::WZR)
+ .addReg(AArch64::W16)
+ .addReg(AArch64::W17)
+ .addImm(0),
+ *STI);
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::Bcc)
+ .addImm(AArch64CC::LS)
+ .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
+ *STI);
+
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::ORRXri)
+ .addReg(AArch64::X16)
+ .addReg(Reg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
+ *STI);
+ OutStreamer->EmitInstruction(MCInstBuilder(AArch64::LDRBBui)
+ .addReg(AArch64::W16)
+ .addReg(AArch64::X16)
+ .addImm(0),
+ *STI);
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::SUBSXrs)
+ .addReg(AArch64::XZR)
+ .addReg(AArch64::X16)
+ .addReg(Reg)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
+ *STI);
+ OutStreamer->EmitInstruction(
+ MCInstBuilder(AArch64::Bcc)
+ .addImm(AArch64CC::EQ)
+ .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
+ *STI);
+ OutStreamer->EmitLabel(HandleMismatchSym);
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::STPXpre)
.addReg(AArch64::SP)
.addReg(AArch64::X0)
Value **MaybeMask);
bool isInterestingAlloca(const AllocaInst &AI);
- bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
+ bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
bool instrumentStack(
}
Instruction *CheckTerm =
- SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
+ SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
MDBuilder(*C).createBranchWeights(1, 100000));
IRB.SetInsertPoint(CheckTerm);
+ Value *OutOfShortGranuleTagRange =
+ IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
+ Instruction *CheckFailTerm =
+ SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
+ MDBuilder(*C).createBranchWeights(1, 100000));
+
+ IRB.SetInsertPoint(CheckTerm);
+ Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
+ PtrLowBits = IRB.CreateAdd(
+ PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
+ Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
+ SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000),
+ nullptr, nullptr, CheckFailTerm->getParent());
+
+ IRB.SetInsertPoint(CheckTerm);
+ Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
+ InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
+ Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
+ Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
+ SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000),
+ nullptr, nullptr, CheckFailTerm->getParent());
+
+ IRB.SetInsertPoint(CheckFailTerm);
InlineAsm *Asm;
switch (TargetTriple.getArch()) {
case Triple::x86_64:
report_fatal_error("unsupported architecture");
}
IRB.CreateCall(Asm, PtrLong);
+ if (Recover)
+ cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
}
void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
}
bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
- Value *Tag) {
- size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
- ~(Mapping.getAllocaAlignment() - 1);
+ Value *Tag, size_t Size) {
+ size_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
if (ClInstrumentWithCalls) {
IRB.CreateCall(HwasanTagMemoryFunc,
{IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
- ConstantInt::get(IntptrTy, Size)});
+ ConstantInt::get(IntptrTy, AlignedSize)});
} else {
size_t ShadowSize = Size >> Mapping.Scale;
Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
// FIXME: the interceptor is not as fast as real memset. Consider lowering
// llvm.memset right here into either a sequence of stores, or a call to
// hwasan_tag_memory.
- IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
+ if (ShadowSize)
+ IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
+ if (Size != AlignedSize) {
+ IRB.CreateStore(
+ ConstantInt::get(Int8Ty, Size % Mapping.getAllocaAlignment()),
+ IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
+ IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
+ Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
+ AlignedSize - 1));
+ }
}
return true;
}
DDI->setArgOperand(2, MetadataAsValue::get(*C, NewExpr));
}
- tagAlloca(IRB, AI, Tag);
+ size_t Size = getAllocaSizeInBytes(*AI);
+ tagAlloca(IRB, AI, Tag, Size);
for (auto RI : RetVec) {
IRB.SetInsertPoint(RI);
// Re-tag alloca memory with the special UAR tag.
Value *Tag = getUARTag(IRB, StackTag);
- tagAlloca(IRB, AI, Tag);
+ tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getAllocaAlignment()));
}
}
for (auto &Inst : BB) {
if (ClInstrumentStack)
if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
- // Realign all allocas. We don't want small uninteresting allocas to
- // hide in instrumented alloca's padding.
- if (AI->getAlignment() < Mapping.getAllocaAlignment())
- AI->setAlignment(Mapping.getAllocaAlignment());
- // Instrument some of them.
if (isInterestingAlloca(*AI))
AllocasToInstrument.push_back(AI);
continue;
StackTag);
}
+ // Pad and align each of the allocas that we instrumented to stop small
+ // uninteresting allocas from hiding in instrumented alloca's padding and so
+ // that we have enough space to store real tags for short granules.
+ DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
+ for (AllocaInst *AI : AllocasToInstrument) {
+ uint64_t Size = getAllocaSizeInBytes(*AI);
+ uint64_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
+ AI->setAlignment(std::max(AI->getAlignment(), 16u));
+ if (Size != AlignedSize) {
+ Type *TypeWithPadding = StructType::get(
+ AI->getAllocatedType(), ArrayType::get(Int8Ty, AlignedSize - Size));
+ auto *NewAI = new AllocaInst(
+ TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
+ NewAI->takeName(AI);
+ NewAI->setAlignment(AI->getAlignment());
+ NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
+ NewAI->setSwiftError(AI->isSwiftError());
+ NewAI->copyMetadata(*AI);
+ Value *Zero = ConstantInt::get(Int32Ty, 0);
+ auto *GEP = GetElementPtrInst::Create(TypeWithPadding, NewAI,
+ {Zero, Zero}, "", AI);
+ AI->replaceAllUsesWith(GEP);
+ AllocaToPaddedAllocaMap[AI] = NewAI;
+ }
+ }
+
+ if (!AllocaToPaddedAllocaMap.empty()) {
+ for (auto &BB : F)
+ for (auto &Inst : BB)
+ if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst))
+ if (auto *AI =
+ dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation()))
+ if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
+ DVI->setArgOperand(
+ 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI)));
+ for (auto &P : AllocaToPaddedAllocaMap)
+ P.first->eraseFromParent();
+ }
+
// If we split the entry block, move any allocas that were originally in the
// entry block back into the entry block so that they aren't treated as
// dynamic allocas.
; CHECK-NEXT: ldrb w16, [x9, x16]
; CHECK-NEXT: cmp x16, x0, lsr #56
; CHECK-NEXT: b.ne .Ltmp0
+; CHECK-NEXT: .Ltmp1:
; CHECK-NEXT: ret
; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: cmp w16, #15
+; CHECK-NEXT: b.hi .Ltmp2
+; CHECK-NEXT: and x17, x0, #0xf
+; CHECK-NEXT: add x17, x17, #255
+; CHECK-NEXT: cmp w16, w17
+; CHECK-NEXT: b.ls .Ltmp2
+; CHECK-NEXT: orr x16, x0, #0xf
+; CHECK-NEXT: ldrb w16, [x16]
+; CHECK-NEXT: cmp x16, x0, lsr #56
+; CHECK-NEXT: b.eq .Ltmp1
+; CHECK-NEXT: .Ltmp2:
; CHECK-NEXT: stp x0, x1, [sp, #-256]!
; CHECK-NEXT: stp x29, x30, [sp, #232]
; CHECK-NEXT: mov x1, #456
; CHECK-NEXT: ubfx x16, x1, #4, #52
; CHECK-NEXT: ldrb w16, [x9, x16]
; CHECK-NEXT: cmp x16, x1, lsr #56
-; CHECK-NEXT: b.ne .Ltmp1
+; CHECK-NEXT: b.ne .Ltmp3
+; CHECK-NEXT: .Ltmp4:
; CHECK-NEXT: ret
-; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: cmp w16, #15
+; CHECK-NEXT: b.hi .Ltmp5
+; CHECK-NEXT: and x17, x1, #0xf
+; CHECK-NEXT: add x17, x17, #2047
+; CHECK-NEXT: cmp w16, w17
+; CHECK-NEXT: b.ls .Ltmp5
+; CHECK-NEXT: orr x16, x1, #0xf
+; CHECK-NEXT: ldrb w16, [x16]
+; CHECK-NEXT: cmp x16, x1, lsr #56
+; CHECK-NEXT: b.eq .Ltmp4
+; CHECK-NEXT: .Ltmp5:
; CHECK-NEXT: stp x0, x1, [sp, #-256]!
; CHECK-NEXT: stp x29, x30, [sp, #232]
; CHECK-NEXT: mov x0, x1
define void @test_alloca() sanitize_hwaddress {
; CHECK-LABEL: @test_alloca(
+; CHECK: %[[GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %x, i32 0, i32 0
; CHECK: %[[T1:[^ ]*]] = call i8 @__hwasan_generate_tag()
; CHECK: %[[A:[^ ]*]] = zext i8 %[[T1]] to i64
-; CHECK: %[[B:[^ ]*]] = ptrtoint i32* %x to i64
+; CHECK: %[[B:[^ ]*]] = ptrtoint i32* %[[GEP]] to i64
; CHECK: %[[C:[^ ]*]] = shl i64 %[[A]], 56
; CHECK: or i64 %[[B]], %[[C]]
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 20
; CHECK: %[[BASE_TAG:[^ ]*]] = xor i64 %[[A]], %[[B]]
-; CHECK: %[[X:[^ ]*]] = alloca i32, align 16
+; CHECK: %[[X:[^ ]*]] = alloca { i32, [12 x i8] }, align 16
+; CHECK: %[[X_GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %[[X]], i32 0, i32 0
; CHECK: %[[X_TAG:[^ ]*]] = xor i64 %[[BASE_TAG]], 0
-; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X]] to i64
+; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
; CHECK: %[[C:[^ ]*]] = shl i64 %[[X_TAG]], 56
; CHECK: %[[D:[^ ]*]] = or i64 %[[X1]], %[[C]]
; CHECK: %[[X_HWASAN:[^ ]*]] = inttoptr i64 %[[D]] to i32*
; CHECK: %[[X_TAG2:[^ ]*]] = trunc i64 %[[X_TAG]] to i8
-; CHECK: %[[E:[^ ]*]] = ptrtoint i32* %[[X]] to i64
+; CHECK: %[[E:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
; CHECK: %[[F:[^ ]*]] = lshr i64 %[[E]], 4
; DYNAMIC-SHADOW: %[[X_SHADOW:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F]]
; ZERO-BASED-SHADOW: %[[X_SHADOW:[^ ]*]] = inttoptr i64 %[[F]] to i8*
-; CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %[[X_SHADOW]], i8 %[[X_TAG2]], i64 1, i1 false)
+; CHECK: %[[X_SHADOW_GEP:[^ ]*]] = getelementptr i8, i8* %[[X_SHADOW]], i32 0
+; CHECK: store i8 4, i8* %[[X_SHADOW_GEP]]
+; CHECK: %[[X_I8:[^ ]*]] = bitcast i32* %[[X_GEP]] to i8*
+; CHECK: %[[X_I8_GEP:[^ ]*]] = getelementptr i8, i8* %[[X_I8]], i32 15
+; CHECK: store i8 %[[X_TAG2]], i8* %[[X_I8_GEP]]
; CHECK: call void @use32(i32* nonnull %[[X_HWASAN]])
; UAR-TAGS: %[[BASE_TAG_COMPL:[^ ]*]] = xor i64 %[[BASE_TAG]], 255
; UAR-TAGS: %[[X_TAG_UAR:[^ ]*]] = trunc i64 %[[BASE_TAG_COMPL]] to i8
-; CHECK: %[[E2:[^ ]*]] = ptrtoint i32* %[[X]] to i64
+; CHECK: %[[E2:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
; CHECK: %[[F2:[^ ]*]] = lshr i64 %[[E2]], 4
; DYNAMIC-SHADOW: %[[X_SHADOW2:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F2]]
; ZERO-BASED-SHADOW: %[[X_SHADOW2:[^ ]*]] = inttoptr i64 %[[F2]] to i8*
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
-; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
+; RECOVER: br i1 %[[F]], label %[[MISMATCH:[0-9]*]], label %[[CONT:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[MISMATCH]]:
+; RECOVER: %[[NOTSHORT:[^ ]*]] = icmp ugt i8 %[[MEMTAG]], 15
+; RECOVER: br i1 %[[NOTSHORT]], label %[[FAIL:[0-9]*]], label %[[SHORT:[0-9]*]], !prof {{.*}}
+; RECOVER: [[FAIL]]:
; RECOVER: call void asm sideeffect "brk #2336", "{x0}"(i64 %[[A]])
; RECOVER: br label
+; RECOVER: [[SHORT]]:
+; RECOVER: %[[LOWBITS:[^ ]*]] = and i64 %[[A]], 15
+; RECOVER: %[[LOWBITS_I8:[^ ]*]] = trunc i64 %[[LOWBITS]] to i8
+; RECOVER: %[[LAST:[^ ]*]] = add i8 %[[LOWBITS_I8]], 0
+; RECOVER: %[[OOB:[^ ]*]] = icmp uge i8 %[[LAST]], %[[MEMTAG]]
+; RECOVER: br i1 %[[OOB]], label %[[FAIL]], label %[[INBOUNDS:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[INBOUNDS]]:
+; RECOVER: %[[EOG_ADDR:[^ ]*]] = or i64 %[[C]], 15
+; RECOVER: %[[EOG_PTR:[^ ]*]] = inttoptr i64 %[[EOG_ADDR]] to i8*
+; RECOVER: %[[EOGTAG:[^ ]*]] = load i8, i8* %[[EOG_PTR]]
+; RECOVER: %[[EOG_MISMATCH:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[EOGTAG]]
+; RECOVER: br i1 %[[EOG_MISMATCH]], label %[[FAIL]], label %[[CONT1:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[CONT1]]:
+; RECOVER: br label %[[CONT]]
+
+; RECOVER: [[CONT]]:
+
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %a, i32 0)
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %a, i32 0)
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
-; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
+; RECOVER: br i1 %[[F]], label %[[MISMATCH:[0-9]*]], label %[[CONT:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[MISMATCH]]:
+; RECOVER: %[[NOTSHORT:[^ ]*]] = icmp ugt i8 %[[MEMTAG]], 15
+; RECOVER: br i1 %[[NOTSHORT]], label %[[FAIL:[0-9]*]], label %[[SHORT:[0-9]*]], !prof {{.*}}
+; RECOVER: [[FAIL]]:
; RECOVER: call void asm sideeffect "brk #2337", "{x0}"(i64 %[[A]])
; RECOVER: br label
+; RECOVER: [[SHORT]]:
+; RECOVER: %[[LOWBITS:[^ ]*]] = and i64 %[[A]], 15
+; RECOVER: %[[LOWBITS_I8:[^ ]*]] = trunc i64 %[[LOWBITS]] to i8
+; RECOVER: %[[LAST:[^ ]*]] = add i8 %[[LOWBITS_I8]], 1
+; RECOVER: %[[OOB:[^ ]*]] = icmp uge i8 %[[LAST]], %[[MEMTAG]]
+; RECOVER: br i1 %[[OOB]], label %[[FAIL]], label %[[INBOUNDS:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[INBOUNDS]]:
+; RECOVER: %[[EOG_ADDR:[^ ]*]] = or i64 %[[C]], 15
+; RECOVER: %[[EOG_PTR:[^ ]*]] = inttoptr i64 %[[EOG_ADDR]] to i8*
+; RECOVER: %[[EOGTAG:[^ ]*]] = load i8, i8* %[[EOG_PTR]]
+; RECOVER: %[[EOG_MISMATCH:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[EOGTAG]]
+; RECOVER: br i1 %[[EOG_MISMATCH]], label %[[FAIL]], label %[[CONT1:[0-9]*]], !prof {{.*}}
+
+; RECOVER: [[CONT1]]:
+; RECOVER: br label %[[CONT]]
+
+; RECOVER: [[CONT]]:
+
; ABORT: %[[A:[^ ]*]] = bitcast i16* %a to i8*
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 1)
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 1)
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 20
; CHECK: %[[BASE_TAG:[^ ]*]] = xor i64 %[[A]], %[[B]]
-; CHECK: %[[X:[^ ]*]] = alloca i32, align 16
+; CHECK: %[[X:[^ ]*]] = alloca { i32, [12 x i8] }, align 16
+; CHECK: %[[X_GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %[[X]], i32 0, i32 0
; CHECK: %[[X_TAG:[^ ]*]] = xor i64 %[[BASE_TAG]], 0
-; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X]] to i64
+; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
; CHECK: %[[C:[^ ]*]] = shl i64 %[[X_TAG]], 56
; CHECK: %[[D:[^ ]*]] = or i64 %[[C]], 72057594037927935
; CHECK: %[[E:[^ ]*]] = and i64 %[[X1]], %[[D]]