#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Argument.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
cl::desc("Check stack-use-after-return"),
cl::Hidden, cl::init(true));
+static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
+ cl::desc("Create redzones for byval "
+ "arguments (extra copy "
+ "required)"), cl::Hidden,
+ cl::init(true));
static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
cl::desc("Check stack-use-after-scope"),
cl::Hidden, cl::init(false));
bool runOnFunction() {
if (!ClStack) return false;
+
+ if (ClRedzoneByvalArgs) copyArgsPassedByValToAllocas();
+
// Collect alloca, ret, lifetime instructions etc.
for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
return true;
}
+ // Arguments marked with the "byval" attribute are implicitly copied without
+ // using an alloca instruction. To produce redzones for those arguments, we
+ // copy them a second time into memory allocated with an alloca instruction.
+ void copyArgsPassedByValToAllocas();
+
// Finds all Alloca instructions and puts
// poisoned red zones around all of them.
// Then unpoison everything back before the function returns.
llvm_unreachable("impossible LocalStackSize");
}
+void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
+ BasicBlock &FirstBB = *F.begin();
+ IRBuilder<> IRB(&FirstBB, FirstBB.getFirstInsertionPt());
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ for (Argument &Arg : F.args()) {
+ if (Arg.hasByValAttr()) {
+ Type *Ty = Arg.getType()->getPointerElementType();
+ unsigned Align = Arg.getParamAlignment();
+ if (Align == 0) Align = DL.getABITypeAlignment(Ty);
+
+ const std::string &Name = Arg.hasName() ? Arg.getName().str() :
+ "Arg" + llvm::to_string(Arg.getArgNo());
+ AllocaInst *AI = IRB.CreateAlloca(Ty, nullptr, Twine(Name) + ".byval");
+ AI->setAlignment(Align);
+ Arg.replaceAllUsesWith(AI);
+
+ uint64_t AllocSize = DL.getTypeAllocSize(Ty);
+ IRB.CreateMemCpy(AI, &Arg, AllocSize, Align);
+ }
+ }
+}
+
PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
Value *ValueIfTrue,
Instruction *ThenTerm,
--- /dev/null
+; This check verifies that arguments passed by value get redzones.
+; RUN: opt < %s -asan -asan-realign-stack=32 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.A = type { [8 x i32] }
+
+declare i32 @bar(%struct.A*)
+
+; Test behavior for named argument with explicit alignment. The memcpy and
+; alloca alignments should match the explicit alignment of 64.
+define void @foo(%struct.A* byval align 64 %a) sanitize_address {
+entry:
+; CHECK-LABEL: foo
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 64
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %a
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 64
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %a)
+ ret void
+}
+
+; Test behavior for unnamed argument without explicit alignment. In this case,
+; the first argument is referenced by the identifier %0 and the ABI requires a
+; minimum alignment of 4 bytes since struct.A contains i32s which have 4-byte
+; alignment. However, the alloca alignment will be 32 since that is the value
+; passed via the -asan-realign-stack option, which is greater than 4.
+define void @baz(%struct.A* byval) sanitize_address {
+entry:
+; CHECK-LABEL: baz
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 32
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %0
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 4
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %0)
+ ret void
+}