-//===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
+//===- AddressSanitizer.cpp - memory error detector -----------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/BinaryFormat/MachO.h"
#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <iomanip>
#include <limits>
+#include <memory>
#include <sstream>
#include <string>
-#include <system_error>
+#include <tuple>
using namespace llvm;
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
-static const uint64_t kDynamicShadowSentinel = ~(uint64_t)0;
+static const uint64_t kDynamicShadowSentinel =
+ std::numeric_limits<uint64_t>::max();
static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kIOSSimShadowOffset32 = 1ULL << 30;
static const uint64_t kIOSSimShadowOffset64 = kDefaultShadowOffset64;
static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
+
// The shadow memory space is dynamically allocated.
static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
static const unsigned kAllocaRzSize = 32;
// Command-line flags.
+
static cl::opt<bool> ClEnableKasan(
"asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
cl::Hidden, cl::init(false));
+
static cl::opt<bool> ClRecover(
"asan-recover",
cl::desc("Enable recovery mode (continue-after-error)."),
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
cl::desc("instrument read instructions"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClInstrumentWrites(
"asan-instrument-writes", cl::desc("instrument write instructions"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClInstrumentAtomics(
"asan-instrument-atomics",
cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
cl::init(true));
+
static cl::opt<bool> ClAlwaysSlowPath(
"asan-always-slow-path",
cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
cl::init(false));
+
static cl::opt<bool> ClForceDynamicShadow(
"asan-force-dynamic-shadow",
cl::desc("Load shadow address into a local variable for each function"),
"asan-max-ins-per-bb", cl::init(10000),
cl::desc("maximal number of instructions to instrument in any given BB"),
cl::Hidden);
+
// This flag may need to be replaced with -f[no]asan-stack.
static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
cl::Hidden, cl::init(true));
cl::desc(
"Inline shadow poisoning for blocks up to the given size in bytes."),
cl::Hidden, cl::init(64));
+
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
cl::desc("Check stack-use-after-return"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
cl::desc("Create redzones for byval "
"arguments (extra copy "
"required)"), cl::Hidden,
cl::init(true));
+
static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
cl::desc("Check stack-use-after-scope"),
cl::Hidden, cl::init(false));
+
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden,
cl::init(true));
+
static cl::opt<bool> ClInitializers("asan-initialization-order",
cl::desc("Handle C++ initializer order"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClInvalidPointerPairs(
"asan-detect-invalid-pointer-pair",
cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
cl::init(false));
+
static cl::opt<unsigned> ClRealignStack(
"asan-realign-stack",
cl::desc("Realign stack to the value of this flag (power of two)"),
cl::Hidden, cl::init(32));
+
static cl::opt<int> ClInstrumentationWithCallsThreshold(
"asan-instrumentation-with-call-threshold",
cl::desc(
"this number of memory accesses, use callbacks instead of "
"inline checks (-1 means never use callbacks)."),
cl::Hidden, cl::init(7000));
+
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
"asan-memory-access-callback-prefix",
cl::desc("Prefix for memory access callbacks"), cl::Hidden,
cl::init("__asan_"));
+
static cl::opt<bool>
ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
cl::desc("instrument dynamic allocas"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClSkipPromotableAllocas(
"asan-skip-promotable-allocas",
cl::desc("Do not instrument promotable allocas"), cl::Hidden,
// These flags allow to change the shadow mapping.
// The shadow mapping looks like
// Shadow = (Mem >> scale) + offset
+
static cl::opt<int> ClMappingScale("asan-mapping-scale",
cl::desc("scale of asan shadow mapping"),
cl::Hidden, cl::init(0));
+
static cl::opt<unsigned long long> ClMappingOffset(
"asan-mapping-offset",
cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden,
// Optimization flags. Not user visible, used mostly for testing
// and benchmarking the tool.
+
static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClOptSameTemp(
"asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClOptGlobals("asan-opt-globals",
cl::desc("Don't instrument scalar globals"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClOptStack(
"asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
cl::Hidden, cl::init(false));
cl::Hidden, cl::init(true));
// Debug flags.
+
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
cl::init(0));
+
static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
cl::Hidden, cl::init(0));
+
static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
cl::desc("Debug func"));
+
static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
cl::Hidden, cl::init(-1));
+
static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
cl::Hidden, cl::init(-1));
"Number of optimized accesses to stack vars");
namespace {
+
/// Frontend-provided metadata for source location.
struct LocationMetadata {
StringRef Filename;
- int LineNo;
- int ColumnNo;
+ int LineNo = 0;
+ int ColumnNo = 0;
- LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
+ LocationMetadata() = default;
bool empty() const { return Filename.empty(); }
/// Frontend-provided metadata for global variables.
class GlobalsMetadata {
- public:
+public:
struct Entry {
- Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {}
LocationMetadata SourceLoc;
StringRef Name;
- bool IsDynInit;
- bool IsBlacklisted;
+ bool IsDynInit = false;
+ bool IsBlacklisted = false;
+
+ Entry() = default;
};
- GlobalsMetadata() : inited_(false) {}
+ GlobalsMetadata() = default;
void reset() {
inited_ = false;
return (Pos != Entries.end()) ? Pos->second : Entry();
}
- private:
- bool inited_;
+private:
+ bool inited_ = false;
DenseMap<GlobalVariable *, Entry> Entries;
};
bool OrShadowOffset;
};
+} // end anonymous namespace
+
static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
bool IsKasan) {
bool IsAndroid = TargetTriple.isAndroid();
bool IsNetBSD = TargetTriple.isOSNetBSD();
bool IsPS4CPU = TargetTriple.isPS4CPU();
bool IsLinux = TargetTriple.isOSLinux();
- bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
- TargetTriple.getArch() == llvm::Triple::ppc64le;
- bool IsSystemZ = TargetTriple.getArch() == llvm::Triple::systemz;
- bool IsX86 = TargetTriple.getArch() == llvm::Triple::x86;
- bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
- bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
- TargetTriple.getArch() == llvm::Triple::mipsel;
- bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
- TargetTriple.getArch() == llvm::Triple::mips64el;
- bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64;
+ bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
+ TargetTriple.getArch() == Triple::ppc64le;
+ bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
+ bool IsX86 = TargetTriple.getArch() == Triple::x86;
+ bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
+ bool IsMIPS32 = TargetTriple.getArch() == Triple::mips ||
+ TargetTriple.getArch() == Triple::mipsel;
+ bool IsMIPS64 = TargetTriple.getArch() == Triple::mips64 ||
+ TargetTriple.getArch() == Triple::mips64el;
+ bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
bool IsWindows = TargetTriple.isOSWindows();
bool IsFuchsia = TargetTriple.isOSFuchsia();
return std::max(32U, 1U << MappingScale);
}
+namespace {
+
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
+ // Pass identification, replacement for typeid
+ static char ID;
+
explicit AddressSanitizer(bool CompileKernel = false, bool Recover = false,
bool UseAfterScope = false)
: FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan),
Recover(Recover || ClRecover),
- UseAfterScope(UseAfterScope || ClUseAfterScope),
- LocalDynamicShadow(nullptr) {
+ UseAfterScope(UseAfterScope || ClUseAfterScope) {
initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
}
+
StringRef getPassName() const override {
return "AddressSanitizerFunctionPass";
}
+
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
+
uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
uint64_t ArraySize = 1;
if (AI.isArrayAllocation()) {
AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
return SizeInBytes * ArraySize;
}
+
/// Check if we want (and can) handle this alloca.
bool isInterestingAlloca(const AllocaInst &AI);
Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
uint64_t *TypeSize, unsigned *Alignment,
Value **MaybeMask = nullptr);
+
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void markEscapedLocalAllocas(Function &F);
bool doInitialization(Module &M) override;
bool doFinalization(Module &M) override;
- static char ID; // Pass identification, replacement for typeid
DominatorTree &getDominatorTree() const { return *DT; }
- private:
+private:
+ friend struct FunctionStackPoisoner;
+
void initializeCallbacks(Module &M);
bool LooksLikeCodeInBug11395(Instruction *I);
/// Helper to cleanup per-function state.
struct FunctionStateRAII {
AddressSanitizer *Pass;
+
FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
assert(Pass->ProcessedAllocas.empty() &&
"last pass forgot to clear cache");
assert(!Pass->LocalDynamicShadow);
}
+
~FunctionStateRAII() {
Pass->LocalDynamicShadow = nullptr;
Pass->ProcessedAllocas.clear();
DominatorTree *DT;
Function *AsanHandleNoReturnFunc;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
- // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
+
+ // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
Function *AsanErrorCallback[2][2][kNumberOfAccessSizes];
Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
- // This array is indexed by AccessIsWrite and Experiment.
+
+ // These arrays is indexed by AccessIsWrite and Experiment.
Function *AsanErrorCallbackSized[2][2];
Function *AsanMemoryAccessCallbackSized[2][2];
+
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
- Value *LocalDynamicShadow;
+ Value *LocalDynamicShadow = nullptr;
GlobalsMetadata GlobalsMD;
DenseMap<const AllocaInst *, bool> ProcessedAllocas;
-
- friend struct FunctionStackPoisoner;
};
class AddressSanitizerModule : public ModulePass {
public:
+ // Pass identification, replacement for typeid
+ static char ID;
+
explicit AddressSanitizerModule(bool CompileKernel = false,
bool Recover = false,
bool UseGlobalsGC = true)
// ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
// do globals-gc.
UseCtorComdat(UseGlobalsGC && ClWithComdat) {}
+
bool runOnModule(Module &M) override;
- static char ID; // Pass identification, replacement for typeid
StringRef getPassName() const override { return "AddressSanitizerModule"; }
private:
IntrinsicInst *LocalEscapeCall = nullptr;
// Maps Value to an AllocaInst from which the Value is originated.
- typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
+ using AllocaForValueMapTy = DenseMap<Value *, AllocaInst *>;
AllocaForValueMapTy AllocaForValue;
bool HasNonEmptyInlineAsm = false;
Instruction *ThenTerm, Value *ValueIfFalse);
};
-} // anonymous namespace
+} // end anonymous namespace
char AddressSanitizer::ID = 0;
+
INITIALIZE_PASS_BEGIN(
AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
false)
+
FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel,
bool Recover,
bool UseAfterScope) {
}
char AddressSanitizerModule::ID = 0;
+
INITIALIZE_PASS(
AddressSanitizerModule, "asan-module",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass",
false, false)
+
ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel,
bool Recover,
bool UseGlobalsGC) {
-//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
+//===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
/// \file
/// This file is a part of MemorySanitizer, a detector of uninitialized
/// reads.
/// implementation ignores the load aspect of CAS/RMW, always returning a clean
/// value. It implements the store part as a simple atomic store by storing a
/// clean shadow.
-
+//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
using namespace llvm;
static cl::opt<int> ClTrackOrigins("msan-track-origins",
cl::desc("Track origins (allocation sites) of poisoned memory"),
cl::Hidden, cl::init(0));
+
static cl::opt<bool> ClKeepGoing("msan-keep-going",
cl::desc("keep going after reporting a UMR"),
cl::Hidden, cl::init(false));
+
static cl::opt<bool> ClPoisonStack("msan-poison-stack",
cl::desc("poison uninitialized stack variables"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
cl::desc("poison uninitialized stack variables with a call"),
cl::Hidden, cl::init(false));
+
static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
cl::desc("poison uninitialized stack variables with the given pattern"),
cl::Hidden, cl::init(0xff));
+
static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
cl::desc("poison undef temps"),
cl::Hidden, cl::init(true));
const MemoryMapParams *bits64;
};
+} // end anonymous namespace
+
// i386 Linux
static const MemoryMapParams Linux_I386_MemoryMapParams = {
0x000080000000, // AndMask
&FreeBSD_X86_64_MemoryMapParams,
};
+namespace {
+
/// \brief An instrumentation pass implementing detection of uninitialized
/// reads.
///
/// MemorySanitizer: instrument the code in module to find
/// uninitialized reads.
class MemorySanitizer : public FunctionPass {
- public:
+public:
+ // Pass identification, replacement for typeid.
+ static char ID;
+
MemorySanitizer(int TrackOrigins = 0, bool Recover = false)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
- Recover(Recover || ClKeepGoing),
- WarningFn(nullptr) {}
+ Recover(Recover || ClKeepGoing) {}
+
StringRef getPassName() const override { return "MemorySanitizer"; }
+
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
+
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
- static char ID; // Pass identification, replacement for typeid.
- private:
+private:
+ friend struct MemorySanitizerVisitor;
+ friend struct VarArgAMD64Helper;
+ friend struct VarArgMIPS64Helper;
+ friend struct VarArgAArch64Helper;
+ friend struct VarArgPowerPC64Helper;
+
void initializeCallbacks(Module &M);
/// \brief Track origins (allocation points) of uninitialized values.
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
+
/// \brief Thread-local shadow storage for function parameters.
GlobalVariable *ParamTLS;
+
/// \brief Thread-local origin storage for function parameters.
GlobalVariable *ParamOriginTLS;
+
/// \brief Thread-local shadow storage for function return value.
GlobalVariable *RetvalTLS;
+
/// \brief Thread-local origin storage for function return value.
GlobalVariable *RetvalOriginTLS;
+
/// \brief Thread-local shadow storage for in-register va_arg function
/// parameters (x86_64-specific).
GlobalVariable *VAArgTLS;
+
/// \brief Thread-local shadow storage for va_arg overflow area
/// (x86_64-specific).
GlobalVariable *VAArgOverflowSizeTLS;
+
/// \brief Thread-local space used to pass origin value to the UMR reporting
/// function.
GlobalVariable *OriginTLS;
/// \brief The run-time callback to print a warning.
- Value *WarningFn;
+ Value *WarningFn = nullptr;
+
// These arrays are indexed by log2(AccessSize).
Value *MaybeWarningFn[kNumberOfAccessSizes];
Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
/// \brief Run-time helper that generates a new origin value for a stack
/// allocation.
Value *MsanSetAllocaOrigin4Fn;
+
/// \brief Run-time helper that poisons stack on function entry.
Value *MsanPoisonStackFn;
+
/// \brief Run-time helper that records a store (or any event) of an
/// uninitialized value and returns an updated origin id encoding this info.
Value *MsanChainOriginFn;
+
/// \brief MSan runtime replacements for memmove, memcpy and memset.
Value *MemmoveFn, *MemcpyFn, *MemsetFn;
const MemoryMapParams *MapParams;
MDNode *ColdCallWeights;
+
/// \brief Branch weights for origin store.
MDNode *OriginStoreWeights;
+
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
- Function *MsanCtorFunction;
- friend struct MemorySanitizerVisitor;
- friend struct VarArgAMD64Helper;
- friend struct VarArgMIPS64Helper;
- friend struct VarArgAArch64Helper;
- friend struct VarArgPowerPC64Helper;
+ Function *MsanCtorFunction;
};
-} // anonymous namespace
+
+} // end anonymous namespace
char MemorySanitizer::ID = 0;
+
INITIALIZE_PASS_BEGIN(
MemorySanitizer, "msan",
"MemorySanitizer: detects uninitialized reads.", false, false)
/// the function, and should avoid creating new basic blocks. A new
/// instance of this class is created for each instrumented function.
struct VarArgHelper {
+ virtual ~VarArgHelper() = default;
+
/// \brief Visit a CallSite.
virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
/// This method is called after visiting all interesting (see above)
/// instructions in a function.
virtual void finalizeInstrumentation() = 0;
-
- virtual ~VarArgHelper() {}
};
struct MemorySanitizerVisitor;
-VarArgHelper*
-CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
- MemorySanitizerVisitor &Visitor);
+} // end anonymous namespace
+
+static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor);
-unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
+static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
if (TypeSize <= 8) return 0;
return Log2_32_Ceil((TypeSize + 7) / 8);
}
+namespace {
+
/// This class does all the work for a given function. Store and Load
/// instructions store and load corresponding shadow and origin
/// values. Most instructions propagate shadow from arguments to their
Value *Shadow;
Value *Origin;
Instruction *OrigIns;
+
ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
- : Shadow(S), Origin(O), OrigIns(I) { }
+ : Shadow(S), Origin(O), OrigIns(I) {}
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
SmallVector<StoreInst *, 16> StoreList;
for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
visit(*BB);
-
// Finalize PHI nodes.
for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
/// arguments are initialized.
template <bool CombineShadow>
class Combiner {
- Value *Shadow;
- Value *Origin;
+ Value *Shadow = nullptr;
+ Value *Origin = nullptr;
IRBuilder<> &IRB;
MemorySanitizerVisitor *MSV;
public:
- Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
- Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
+ Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
+ : IRB(IRB), MSV(MSV) {}
/// \brief Add a pair of shadow and origin values to the mix.
Combiner &Add(Value *OpShadow, Value *OpOrigin) {
}
};
- typedef Combiner<true> ShadowAndOriginCombiner;
- typedef Combiner<false> OriginCombiner;
+ using ShadowAndOriginCombiner = Combiner<true>;
+ using OriginCombiner = Combiner<false>;
/// \brief Propagate origin for arbitrary operation.
void setOriginForNaryOp(Instruction &I) {
// intrinsic.
Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
switch (id) {
- case llvm::Intrinsic::x86_sse2_packsswb_128:
- case llvm::Intrinsic::x86_sse2_packuswb_128:
- return llvm::Intrinsic::x86_sse2_packsswb_128;
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_sse2_packuswb_128:
+ return Intrinsic::x86_sse2_packsswb_128;
- case llvm::Intrinsic::x86_sse2_packssdw_128:
- case llvm::Intrinsic::x86_sse41_packusdw:
- return llvm::Intrinsic::x86_sse2_packssdw_128;
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse41_packusdw:
+ return Intrinsic::x86_sse2_packssdw_128;
- case llvm::Intrinsic::x86_avx2_packsswb:
- case llvm::Intrinsic::x86_avx2_packuswb:
- return llvm::Intrinsic::x86_avx2_packsswb;
+ case Intrinsic::x86_avx2_packsswb:
+ case Intrinsic::x86_avx2_packuswb:
+ return Intrinsic::x86_avx2_packsswb;
- case llvm::Intrinsic::x86_avx2_packssdw:
- case llvm::Intrinsic::x86_avx2_packusdw:
- return llvm::Intrinsic::x86_avx2_packssdw;
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packusdw:
+ return Intrinsic::x86_avx2_packssdw;
- case llvm::Intrinsic::x86_mmx_packsswb:
- case llvm::Intrinsic::x86_mmx_packuswb:
- return llvm::Intrinsic::x86_mmx_packsswb;
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packuswb:
+ return Intrinsic::x86_mmx_packsswb;
- case llvm::Intrinsic::x86_mmx_packssdw:
- return llvm::Intrinsic::x86_mmx_packssdw;
+ case Intrinsic::x86_mmx_packssdw:
+ return Intrinsic::x86_mmx_packssdw;
default:
llvm_unreachable("unexpected intrinsic id");
}
S2 = IRB.CreateBitCast(S2, T);
}
Value *S1_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
+ IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
Value *S2_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
+ IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
if (isX86_MMX) {
Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
- case llvm::Intrinsic::bswap:
+ case Intrinsic::bswap:
handleBswap(I);
break;
- case llvm::Intrinsic::x86_sse_stmxcsr:
+ case Intrinsic::x86_sse_stmxcsr:
handleStmxcsr(I);
break;
- case llvm::Intrinsic::x86_sse_ldmxcsr:
+ case Intrinsic::x86_sse_ldmxcsr:
handleLdmxcsr(I);
break;
- case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
- case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
- case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
- case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
- case llvm::Intrinsic::x86_avx512_cvttss2usi64:
- case llvm::Intrinsic::x86_avx512_cvttss2usi:
- case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
- case llvm::Intrinsic::x86_avx512_cvttsd2usi:
- case llvm::Intrinsic::x86_avx512_cvtusi2sd:
- case llvm::Intrinsic::x86_avx512_cvtusi2ss:
- case llvm::Intrinsic::x86_avx512_cvtusi642sd:
- case llvm::Intrinsic::x86_avx512_cvtusi642ss:
- case llvm::Intrinsic::x86_sse2_cvtsd2si64:
- case llvm::Intrinsic::x86_sse2_cvtsd2si:
- case llvm::Intrinsic::x86_sse2_cvtsd2ss:
- case llvm::Intrinsic::x86_sse2_cvtsi2sd:
- case llvm::Intrinsic::x86_sse2_cvtsi642sd:
- case llvm::Intrinsic::x86_sse2_cvtss2sd:
- case llvm::Intrinsic::x86_sse2_cvttsd2si64:
- case llvm::Intrinsic::x86_sse2_cvttsd2si:
- case llvm::Intrinsic::x86_sse_cvtsi2ss:
- case llvm::Intrinsic::x86_sse_cvtsi642ss:
- case llvm::Intrinsic::x86_sse_cvtss2si64:
- case llvm::Intrinsic::x86_sse_cvtss2si:
- case llvm::Intrinsic::x86_sse_cvttss2si64:
- case llvm::Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_avx512_vcvtsd2usi64:
+ case Intrinsic::x86_avx512_vcvtsd2usi32:
+ case Intrinsic::x86_avx512_vcvtss2usi64:
+ case Intrinsic::x86_avx512_vcvtss2usi32:
+ case Intrinsic::x86_avx512_cvttss2usi64:
+ case Intrinsic::x86_avx512_cvttss2usi:
+ case Intrinsic::x86_avx512_cvttsd2usi64:
+ case Intrinsic::x86_avx512_cvttsd2usi:
+ case Intrinsic::x86_avx512_cvtusi2sd:
+ case Intrinsic::x86_avx512_cvtusi2ss:
+ case Intrinsic::x86_avx512_cvtusi642sd:
+ case Intrinsic::x86_avx512_cvtusi642ss:
+ case Intrinsic::x86_sse2_cvtsd2si64:
+ case Intrinsic::x86_sse2_cvtsd2si:
+ case Intrinsic::x86_sse2_cvtsd2ss:
+ case Intrinsic::x86_sse2_cvtsi2sd:
+ case Intrinsic::x86_sse2_cvtsi642sd:
+ case Intrinsic::x86_sse2_cvtss2sd:
+ case Intrinsic::x86_sse2_cvttsd2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse_cvtsi2ss:
+ case Intrinsic::x86_sse_cvtsi642ss:
+ case Intrinsic::x86_sse_cvtss2si64:
+ case Intrinsic::x86_sse_cvtss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse_cvttss2si:
handleVectorConvertIntrinsic(I, 1);
break;
- case llvm::Intrinsic::x86_sse_cvtps2pi:
- case llvm::Intrinsic::x86_sse_cvttps2pi:
+ case Intrinsic::x86_sse_cvtps2pi:
+ case Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
- case llvm::Intrinsic::x86_avx512_psll_w_512:
- case llvm::Intrinsic::x86_avx512_psll_d_512:
- case llvm::Intrinsic::x86_avx512_psll_q_512:
- case llvm::Intrinsic::x86_avx512_pslli_w_512:
- case llvm::Intrinsic::x86_avx512_pslli_d_512:
- case llvm::Intrinsic::x86_avx512_pslli_q_512:
- case llvm::Intrinsic::x86_avx512_psrl_w_512:
- case llvm::Intrinsic::x86_avx512_psrl_d_512:
- case llvm::Intrinsic::x86_avx512_psrl_q_512:
- case llvm::Intrinsic::x86_avx512_psra_w_512:
- case llvm::Intrinsic::x86_avx512_psra_d_512:
- case llvm::Intrinsic::x86_avx512_psra_q_512:
- case llvm::Intrinsic::x86_avx512_psrli_w_512:
- case llvm::Intrinsic::x86_avx512_psrli_d_512:
- case llvm::Intrinsic::x86_avx512_psrli_q_512:
- case llvm::Intrinsic::x86_avx512_psrai_w_512:
- case llvm::Intrinsic::x86_avx512_psrai_d_512:
- case llvm::Intrinsic::x86_avx512_psrai_q_512:
- case llvm::Intrinsic::x86_avx512_psra_q_256:
- case llvm::Intrinsic::x86_avx512_psra_q_128:
- case llvm::Intrinsic::x86_avx512_psrai_q_256:
- case llvm::Intrinsic::x86_avx512_psrai_q_128:
- case llvm::Intrinsic::x86_avx2_psll_w:
- case llvm::Intrinsic::x86_avx2_psll_d:
- case llvm::Intrinsic::x86_avx2_psll_q:
- case llvm::Intrinsic::x86_avx2_pslli_w:
- case llvm::Intrinsic::x86_avx2_pslli_d:
- case llvm::Intrinsic::x86_avx2_pslli_q:
- case llvm::Intrinsic::x86_avx2_psrl_w:
- case llvm::Intrinsic::x86_avx2_psrl_d:
- case llvm::Intrinsic::x86_avx2_psrl_q:
- case llvm::Intrinsic::x86_avx2_psra_w:
- case llvm::Intrinsic::x86_avx2_psra_d:
- case llvm::Intrinsic::x86_avx2_psrli_w:
- case llvm::Intrinsic::x86_avx2_psrli_d:
- case llvm::Intrinsic::x86_avx2_psrli_q:
- case llvm::Intrinsic::x86_avx2_psrai_w:
- case llvm::Intrinsic::x86_avx2_psrai_d:
- case llvm::Intrinsic::x86_sse2_psll_w:
- case llvm::Intrinsic::x86_sse2_psll_d:
- case llvm::Intrinsic::x86_sse2_psll_q:
- case llvm::Intrinsic::x86_sse2_pslli_w:
- case llvm::Intrinsic::x86_sse2_pslli_d:
- case llvm::Intrinsic::x86_sse2_pslli_q:
- case llvm::Intrinsic::x86_sse2_psrl_w:
- case llvm::Intrinsic::x86_sse2_psrl_d:
- case llvm::Intrinsic::x86_sse2_psrl_q:
- case llvm::Intrinsic::x86_sse2_psra_w:
- case llvm::Intrinsic::x86_sse2_psra_d:
- case llvm::Intrinsic::x86_sse2_psrli_w:
- case llvm::Intrinsic::x86_sse2_psrli_d:
- case llvm::Intrinsic::x86_sse2_psrli_q:
- case llvm::Intrinsic::x86_sse2_psrai_w:
- case llvm::Intrinsic::x86_sse2_psrai_d:
- case llvm::Intrinsic::x86_mmx_psll_w:
- case llvm::Intrinsic::x86_mmx_psll_d:
- case llvm::Intrinsic::x86_mmx_psll_q:
- case llvm::Intrinsic::x86_mmx_pslli_w:
- case llvm::Intrinsic::x86_mmx_pslli_d:
- case llvm::Intrinsic::x86_mmx_pslli_q:
- case llvm::Intrinsic::x86_mmx_psrl_w:
- case llvm::Intrinsic::x86_mmx_psrl_d:
- case llvm::Intrinsic::x86_mmx_psrl_q:
- case llvm::Intrinsic::x86_mmx_psra_w:
- case llvm::Intrinsic::x86_mmx_psra_d:
- case llvm::Intrinsic::x86_mmx_psrli_w:
- case llvm::Intrinsic::x86_mmx_psrli_d:
- case llvm::Intrinsic::x86_mmx_psrli_q:
- case llvm::Intrinsic::x86_mmx_psrai_w:
- case llvm::Intrinsic::x86_mmx_psrai_d:
+ case Intrinsic::x86_avx512_psll_w_512:
+ case Intrinsic::x86_avx512_psll_d_512:
+ case Intrinsic::x86_avx512_psll_q_512:
+ case Intrinsic::x86_avx512_pslli_w_512:
+ case Intrinsic::x86_avx512_pslli_d_512:
+ case Intrinsic::x86_avx512_pslli_q_512:
+ case Intrinsic::x86_avx512_psrl_w_512:
+ case Intrinsic::x86_avx512_psrl_d_512:
+ case Intrinsic::x86_avx512_psrl_q_512:
+ case Intrinsic::x86_avx512_psra_w_512:
+ case Intrinsic::x86_avx512_psra_d_512:
+ case Intrinsic::x86_avx512_psra_q_512:
+ case Intrinsic::x86_avx512_psrli_w_512:
+ case Intrinsic::x86_avx512_psrli_d_512:
+ case Intrinsic::x86_avx512_psrli_q_512:
+ case Intrinsic::x86_avx512_psrai_w_512:
+ case Intrinsic::x86_avx512_psrai_d_512:
+ case Intrinsic::x86_avx512_psrai_q_512:
+ case Intrinsic::x86_avx512_psra_q_256:
+ case Intrinsic::x86_avx512_psra_q_128:
+ case Intrinsic::x86_avx512_psrai_q_256:
+ case Intrinsic::x86_avx512_psrai_q_128:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psra_w:
+ case Intrinsic::x86_avx2_psra_d:
+ case Intrinsic::x86_avx2_psrli_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_sse2_pslli_w:
+ case Intrinsic::x86_sse2_pslli_d:
+ case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psra_w:
+ case Intrinsic::x86_sse2_psra_d:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrai_w:
+ case Intrinsic::x86_sse2_psrai_d:
+ case Intrinsic::x86_mmx_psll_w:
+ case Intrinsic::x86_mmx_psll_d:
+ case Intrinsic::x86_mmx_psll_q:
+ case Intrinsic::x86_mmx_pslli_w:
+ case Intrinsic::x86_mmx_pslli_d:
+ case Intrinsic::x86_mmx_pslli_q:
+ case Intrinsic::x86_mmx_psrl_w:
+ case Intrinsic::x86_mmx_psrl_d:
+ case Intrinsic::x86_mmx_psrl_q:
+ case Intrinsic::x86_mmx_psra_w:
+ case Intrinsic::x86_mmx_psra_d:
+ case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psrli_d:
+ case Intrinsic::x86_mmx_psrli_q:
+ case Intrinsic::x86_mmx_psrai_w:
+ case Intrinsic::x86_mmx_psrai_d:
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
- case llvm::Intrinsic::x86_avx2_psllv_d:
- case llvm::Intrinsic::x86_avx2_psllv_d_256:
- case llvm::Intrinsic::x86_avx512_psllv_d_512:
- case llvm::Intrinsic::x86_avx2_psllv_q:
- case llvm::Intrinsic::x86_avx2_psllv_q_256:
- case llvm::Intrinsic::x86_avx512_psllv_q_512:
- case llvm::Intrinsic::x86_avx2_psrlv_d:
- case llvm::Intrinsic::x86_avx2_psrlv_d_256:
- case llvm::Intrinsic::x86_avx512_psrlv_d_512:
- case llvm::Intrinsic::x86_avx2_psrlv_q:
- case llvm::Intrinsic::x86_avx2_psrlv_q_256:
- case llvm::Intrinsic::x86_avx512_psrlv_q_512:
- case llvm::Intrinsic::x86_avx2_psrav_d:
- case llvm::Intrinsic::x86_avx2_psrav_d_256:
- case llvm::Intrinsic::x86_avx512_psrav_d_512:
- case llvm::Intrinsic::x86_avx512_psrav_q_128:
- case llvm::Intrinsic::x86_avx512_psrav_q_256:
- case llvm::Intrinsic::x86_avx512_psrav_q_512:
+ case Intrinsic::x86_avx2_psllv_d:
+ case Intrinsic::x86_avx2_psllv_d_256:
+ case Intrinsic::x86_avx512_psllv_d_512:
+ case Intrinsic::x86_avx2_psllv_q:
+ case Intrinsic::x86_avx2_psllv_q_256:
+ case Intrinsic::x86_avx512_psllv_q_512:
+ case Intrinsic::x86_avx2_psrlv_d:
+ case Intrinsic::x86_avx2_psrlv_d_256:
+ case Intrinsic::x86_avx512_psrlv_d_512:
+ case Intrinsic::x86_avx2_psrlv_q:
+ case Intrinsic::x86_avx2_psrlv_q_256:
+ case Intrinsic::x86_avx512_psrlv_q_512:
+ case Intrinsic::x86_avx2_psrav_d:
+ case Intrinsic::x86_avx2_psrav_d_256:
+ case Intrinsic::x86_avx512_psrav_d_512:
+ case Intrinsic::x86_avx512_psrav_q_128:
+ case Intrinsic::x86_avx512_psrav_q_256:
+ case Intrinsic::x86_avx512_psrav_q_512:
handleVectorShiftIntrinsic(I, /* Variable */ true);
break;
- case llvm::Intrinsic::x86_sse2_packsswb_128:
- case llvm::Intrinsic::x86_sse2_packssdw_128:
- case llvm::Intrinsic::x86_sse2_packuswb_128:
- case llvm::Intrinsic::x86_sse41_packusdw:
- case llvm::Intrinsic::x86_avx2_packsswb:
- case llvm::Intrinsic::x86_avx2_packssdw:
- case llvm::Intrinsic::x86_avx2_packuswb:
- case llvm::Intrinsic::x86_avx2_packusdw:
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse2_packuswb_128:
+ case Intrinsic::x86_sse41_packusdw:
+ case Intrinsic::x86_avx2_packsswb:
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packuswb:
+ case Intrinsic::x86_avx2_packusdw:
handleVectorPackIntrinsic(I);
break;
- case llvm::Intrinsic::x86_mmx_packsswb:
- case llvm::Intrinsic::x86_mmx_packuswb:
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packuswb:
handleVectorPackIntrinsic(I, 16);
break;
- case llvm::Intrinsic::x86_mmx_packssdw:
+ case Intrinsic::x86_mmx_packssdw:
handleVectorPackIntrinsic(I, 32);
break;
- case llvm::Intrinsic::x86_mmx_psad_bw:
- case llvm::Intrinsic::x86_sse2_psad_bw:
- case llvm::Intrinsic::x86_avx2_psad_bw:
+ case Intrinsic::x86_mmx_psad_bw:
+ case Intrinsic::x86_sse2_psad_bw:
+ case Intrinsic::x86_avx2_psad_bw:
handleVectorSadIntrinsic(I);
break;
- case llvm::Intrinsic::x86_sse2_pmadd_wd:
- case llvm::Intrinsic::x86_avx2_pmadd_wd:
- case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
- case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
+ case Intrinsic::x86_sse2_pmadd_wd:
+ case Intrinsic::x86_avx2_pmadd_wd:
+ case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
+ case Intrinsic::x86_avx2_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I);
break;
- case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
+ case Intrinsic::x86_ssse3_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I, 8);
break;
- case llvm::Intrinsic::x86_mmx_pmadd_wd:
+ case Intrinsic::x86_mmx_pmadd_wd:
handleVectorPmaddIntrinsic(I, 16);
break;
- case llvm::Intrinsic::x86_sse_cmp_ss:
- case llvm::Intrinsic::x86_sse2_cmp_sd:
- case llvm::Intrinsic::x86_sse_comieq_ss:
- case llvm::Intrinsic::x86_sse_comilt_ss:
- case llvm::Intrinsic::x86_sse_comile_ss:
- case llvm::Intrinsic::x86_sse_comigt_ss:
- case llvm::Intrinsic::x86_sse_comige_ss:
- case llvm::Intrinsic::x86_sse_comineq_ss:
- case llvm::Intrinsic::x86_sse_ucomieq_ss:
- case llvm::Intrinsic::x86_sse_ucomilt_ss:
- case llvm::Intrinsic::x86_sse_ucomile_ss:
- case llvm::Intrinsic::x86_sse_ucomigt_ss:
- case llvm::Intrinsic::x86_sse_ucomige_ss:
- case llvm::Intrinsic::x86_sse_ucomineq_ss:
- case llvm::Intrinsic::x86_sse2_comieq_sd:
- case llvm::Intrinsic::x86_sse2_comilt_sd:
- case llvm::Intrinsic::x86_sse2_comile_sd:
- case llvm::Intrinsic::x86_sse2_comigt_sd:
- case llvm::Intrinsic::x86_sse2_comige_sd:
- case llvm::Intrinsic::x86_sse2_comineq_sd:
- case llvm::Intrinsic::x86_sse2_ucomieq_sd:
- case llvm::Intrinsic::x86_sse2_ucomilt_sd:
- case llvm::Intrinsic::x86_sse2_ucomile_sd:
- case llvm::Intrinsic::x86_sse2_ucomigt_sd:
- case llvm::Intrinsic::x86_sse2_ucomige_sd:
- case llvm::Intrinsic::x86_sse2_ucomineq_sd:
+ case Intrinsic::x86_sse_cmp_ss:
+ case Intrinsic::x86_sse2_cmp_sd:
+ case Intrinsic::x86_sse_comieq_ss:
+ case Intrinsic::x86_sse_comilt_ss:
+ case Intrinsic::x86_sse_comile_ss:
+ case Intrinsic::x86_sse_comigt_ss:
+ case Intrinsic::x86_sse_comige_ss:
+ case Intrinsic::x86_sse_comineq_ss:
+ case Intrinsic::x86_sse_ucomieq_ss:
+ case Intrinsic::x86_sse_ucomilt_ss:
+ case Intrinsic::x86_sse_ucomile_ss:
+ case Intrinsic::x86_sse_ucomigt_ss:
+ case Intrinsic::x86_sse_ucomige_ss:
+ case Intrinsic::x86_sse_ucomineq_ss:
+ case Intrinsic::x86_sse2_comieq_sd:
+ case Intrinsic::x86_sse2_comilt_sd:
+ case Intrinsic::x86_sse2_comile_sd:
+ case Intrinsic::x86_sse2_comigt_sd:
+ case Intrinsic::x86_sse2_comige_sd:
+ case Intrinsic::x86_sse2_comineq_sd:
+ case Intrinsic::x86_sse2_ucomieq_sd:
+ case Intrinsic::x86_sse2_ucomilt_sd:
+ case Intrinsic::x86_sse2_ucomile_sd:
+ case Intrinsic::x86_sse2_ucomigt_sd:
+ case Intrinsic::x86_sse2_ucomige_sd:
+ case Intrinsic::x86_sse2_ucomineq_sd:
handleVectorCompareScalarIntrinsic(I);
break;
- case llvm::Intrinsic::x86_sse_cmp_ps:
- case llvm::Intrinsic::x86_sse2_cmp_pd:
+ case Intrinsic::x86_sse_cmp_ps:
+ case Intrinsic::x86_sse2_cmp_pd:
// FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
// generates reasonably looking IR that fails in the backend with "Do not
// know how to split the result of this operator!".
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgOverflowSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
- VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgOverflowSize(nullptr) {}
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
+ VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+
ArgKind classifyArgument(Value* arg) {
// A very rough approximation of X86_64 argument classification rules.
Type *T = arg->getType();
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgSize(nullptr) {}
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
CS.getFunctionType()->getNumParams(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
- llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
+ Triple TargetTriple(F.getParent()->getTargetTriple());
Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- if (TargetTriple.getArch() == llvm::Triple::mips64) {
+ if (TargetTriple.getArch() == Triple::mips64) {
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system
if (ArgSize < 8)
}
};
-
/// \brief AArch64-specific implementation of VarArgHelper.
struct VarArgAArch64Helper : public VarArgHelper {
static const unsigned kAArch64GrArgSize = 64;
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgOverflowSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
- VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgOverflowSize(nullptr) {}
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
+ VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+
ArgKind classifyArgument(Value* arg) {
Type *T = arg->getType();
if (T->isFPOrFPVectorTy())
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgSize(nullptr) {}
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
// compute current offset from stack pointer (which is always properly
// aligned), and offset for the first vararg, then subtract them.
unsigned VAArgBase;
- llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
+ Triple TargetTriple(F.getParent()->getTargetTriple());
// Parameter save area starts at 48 bytes from frame pointer for ABIv1,
// and 32 bytes for ABIv2. This is usually determined by target
// endianness, but in theory could be overriden by function attribute.
// For simplicity, we ignore it here (it'd only matter for QPX vectors).
- if (TargetTriple.getArch() == llvm::Triple::ppc64)
+ if (TargetTriple.getArch() == Triple::ppc64)
VAArgBase = 48;
else
VAArgBase = 32;
void finalizeInstrumentation() override {}
};
-VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
- MemorySanitizerVisitor &Visitor) {
+} // end anonymous namespace
+
+static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor) {
// VarArg handling is only implemented on AMD64. False positives are possible
// on other platforms.
- llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
- if (TargetTriple.getArch() == llvm::Triple::x86_64)
+ Triple TargetTriple(Func.getParent()->getTargetTriple());
+ if (TargetTriple.getArch() == Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
- TargetTriple.getArch() == llvm::Triple::mips64el)
+ else if (TargetTriple.getArch() == Triple::mips64 ||
+ TargetTriple.getArch() == Triple::mips64el)
return new VarArgMIPS64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::aarch64)
+ else if (TargetTriple.getArch() == Triple::aarch64)
return new VarArgAArch64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
- TargetTriple.getArch() == llvm::Triple::ppc64le)
+ else if (TargetTriple.getArch() == Triple::ppc64 ||
+ TargetTriple.getArch() == Triple::ppc64le)
return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
-} // anonymous namespace
-
bool MemorySanitizer::runOnFunction(Function &F) {
if (&F == MsanCtorFunction)
return false;