void initializeEarlyMachineLICMPass(PassRegistry&);
void initializeEarlyTailDuplicatePass(PassRegistry&);
void initializeEdgeBundlesPass(PassRegistry&);
-void initializeEfficiencySanitizerPass(PassRegistry&);
void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&);
void initializeEntryExitInstrumenterPass(PassRegistry&);
void initializeExpandISelPseudosPass(PassRegistry&);
const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
-// Options for EfficiencySanitizer sub-tools.
-struct EfficiencySanitizerOptions {
- enum Type {
- ESAN_None = 0,
- ESAN_CacheFrag,
- ESAN_WorkingSet,
- } ToolType = ESAN_None;
-
- EfficiencySanitizerOptions() = default;
-};
-
-// Insert EfficiencySanitizer instrumentation.
-ModulePass *createEfficiencySanitizerPass(
- const EfficiencySanitizerOptions &Options = EfficiencySanitizerOptions());
-
// Options for sanitizer coverage instrumentation.
struct SanitizerCoverageOptions {
enum Type {
PGOMemOPSizeOpt.cpp
SanitizerCoverage.cpp
ThreadSanitizer.cpp
- EfficiencySanitizer.cpp
HWAddressSanitizer.cpp
ADDITIONAL_HEADER_DIRS
+++ /dev/null
-//===-- EfficiencySanitizer.cpp - performance tuner -----------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of EfficiencySanitizer, a family of performance tuners
-// that detects multiple performance issues via separate sub-tools.
-//
-// The instrumentation phase is straightforward:
-// - Take action on every memory access: either inlined instrumentation,
-// or Inserted calls to our run-time library.
-// - Optimizations may apply to avoid instrumenting some of the accesses.
-// - Turn mem{set,cpy,move} instrinsics into library calls.
-// The rest is handled by the run-time library.
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/ModuleUtils.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "esan"
-
-// The tool type must be just one of these ClTool* options, as the tools
-// cannot be combined due to shadow memory constraints.
-static cl::opt<bool>
- ClToolCacheFrag("esan-cache-frag", cl::init(false),
- cl::desc("Detect data cache fragmentation"), cl::Hidden);
-static cl::opt<bool>
- ClToolWorkingSet("esan-working-set", cl::init(false),
- cl::desc("Measure the working set size"), cl::Hidden);
-// Each new tool will get its own opt flag here.
-// These are converted to EfficiencySanitizerOptions for use
-// in the code.
-
-static cl::opt<bool> ClInstrumentLoadsAndStores(
- "esan-instrument-loads-and-stores", cl::init(true),
- cl::desc("Instrument loads and stores"), cl::Hidden);
-static cl::opt<bool> ClInstrumentMemIntrinsics(
- "esan-instrument-memintrinsics", cl::init(true),
- cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
-static cl::opt<bool> ClInstrumentFastpath(
- "esan-instrument-fastpath", cl::init(true),
- cl::desc("Instrument fastpath"), cl::Hidden);
-static cl::opt<bool> ClAuxFieldInfo(
- "esan-aux-field-info", cl::init(true),
- cl::desc("Generate binary with auxiliary struct field information"),
- cl::Hidden);
-
-// Experiments show that the performance difference can be 2x or more,
-// and accuracy loss is typically negligible, so we turn this on by default.
-static cl::opt<bool> ClAssumeIntraCacheLine(
- "esan-assume-intra-cache-line", cl::init(true),
- cl::desc("Assume each memory access touches just one cache line, for "
- "better performance but with a potential loss of accuracy."),
- cl::Hidden);
-
-STATISTIC(NumInstrumentedLoads, "Number of instrumented loads");
-STATISTIC(NumInstrumentedStores, "Number of instrumented stores");
-STATISTIC(NumFastpaths, "Number of instrumented fastpaths");
-STATISTIC(NumAccessesWithIrregularSize,
- "Number of accesses with a size outside our targeted callout sizes");
-STATISTIC(NumIgnoredStructs, "Number of ignored structs");
-STATISTIC(NumIgnoredGEPs, "Number of ignored GEP instructions");
-STATISTIC(NumInstrumentedGEPs, "Number of instrumented GEP instructions");
-STATISTIC(NumAssumedIntraCacheLine,
- "Number of accesses assumed to be intra-cache-line");
-
-static const uint64_t EsanCtorAndDtorPriority = 0;
-static const char *const EsanModuleCtorName = "esan.module_ctor";
-static const char *const EsanModuleDtorName = "esan.module_dtor";
-static const char *const EsanInitName = "__esan_init";
-static const char *const EsanExitName = "__esan_exit";
-
-// We need to specify the tool to the runtime earlier than
-// the ctor is called in some cases, so we set a global variable.
-static const char *const EsanWhichToolName = "__esan_which_tool";
-
-// We must keep these Shadow* constants consistent with the esan runtime.
-// FIXME: Try to place these shadow constants, the names of the __esan_*
-// interface functions, and the ToolType enum into a header shared between
-// llvm and compiler-rt.
-struct ShadowMemoryParams {
- uint64_t ShadowMask;
- uint64_t ShadowOffs[3];
-};
-
-static const ShadowMemoryParams ShadowParams47 = {
- 0x00000fffffffffffull,
- {
- 0x0000130000000000ull, 0x0000220000000000ull, 0x0000440000000000ull,
- }};
-
-static const ShadowMemoryParams ShadowParams40 = {
- 0x0fffffffffull,
- {
- 0x1300000000ull, 0x2200000000ull, 0x4400000000ull,
- }};
-
-// This array is indexed by the ToolType enum.
-static const int ShadowScale[] = {
- 0, // ESAN_None.
- 2, // ESAN_CacheFrag: 4B:1B, so 4 to 1 == >>2.
- 6, // ESAN_WorkingSet: 64B:1B, so 64 to 1 == >>6.
-};
-
-// MaxStructCounterNameSize is a soft size limit to avoid insanely long
-// names for those extremely large structs.
-static const unsigned MaxStructCounterNameSize = 512;
-
-namespace {
-
-static EfficiencySanitizerOptions
-OverrideOptionsFromCL(EfficiencySanitizerOptions Options) {
- if (ClToolCacheFrag)
- Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag;
- else if (ClToolWorkingSet)
- Options.ToolType = EfficiencySanitizerOptions::ESAN_WorkingSet;
-
- // Direct opt invocation with no params will have the default ESAN_None.
- // We run the default tool in that case.
- if (Options.ToolType == EfficiencySanitizerOptions::ESAN_None)
- Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag;
-
- return Options;
-}
-
-/// EfficiencySanitizer: instrument each module to find performance issues.
-class EfficiencySanitizer : public ModulePass {
-public:
- EfficiencySanitizer(
- const EfficiencySanitizerOptions &Opts = EfficiencySanitizerOptions())
- : ModulePass(ID), Options(OverrideOptionsFromCL(Opts)) {}
- StringRef getPassName() const override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
- bool runOnModule(Module &M) override;
- static char ID;
-
-private:
- bool initOnModule(Module &M);
- void initializeCallbacks(Module &M);
- bool shouldIgnoreStructType(StructType *StructTy);
- void createStructCounterName(
- StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr);
- void createCacheFragAuxGV(
- Module &M, const DataLayout &DL, StructType *StructTy,
- GlobalVariable *&TypeNames, GlobalVariable *&Offsets, GlobalVariable *&Size);
- GlobalVariable *createCacheFragInfoGV(Module &M, const DataLayout &DL,
- Constant *UnitName);
- Constant *createEsanInitToolInfoArg(Module &M, const DataLayout &DL);
- void createDestructor(Module &M, Constant *ToolInfoArg);
- bool runOnFunction(Function &F, Module &M);
- bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
- bool instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentGetElementPtr(Instruction *I, Module &M);
- bool insertCounterUpdate(Instruction *I, StructType *StructTy,
- unsigned CounterIdx);
- unsigned getFieldCounterIdx(StructType *StructTy) {
- return 0;
- }
- unsigned getArrayCounterIdx(StructType *StructTy) {
- return StructTy->getNumElements();
- }
- unsigned getStructCounterSize(StructType *StructTy) {
- // The struct counter array includes:
- // - one counter for each struct field,
- // - one counter for the struct access within an array.
- return (StructTy->getNumElements()/*field*/ + 1/*array*/);
- }
- bool shouldIgnoreMemoryAccess(Instruction *I);
- int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
- Value *appToShadow(Value *Shadow, IRBuilder<> &IRB);
- bool instrumentFastpath(Instruction *I, const DataLayout &DL, bool IsStore,
- Value *Addr, unsigned Alignment);
- // Each tool has its own fastpath routine:
- bool instrumentFastpathCacheFrag(Instruction *I, const DataLayout &DL,
- Value *Addr, unsigned Alignment);
- bool instrumentFastpathWorkingSet(Instruction *I, const DataLayout &DL,
- Value *Addr, unsigned Alignment);
-
- EfficiencySanitizerOptions Options;
- LLVMContext *Ctx;
- Type *IntptrTy;
- // Our slowpath involves callouts to the runtime library.
- // Access sizes are powers of two: 1, 2, 4, 8, 16.
- static const size_t NumberOfAccessSizes = 5;
- FunctionCallee EsanAlignedLoad[NumberOfAccessSizes];
- FunctionCallee EsanAlignedStore[NumberOfAccessSizes];
- FunctionCallee EsanUnalignedLoad[NumberOfAccessSizes];
- FunctionCallee EsanUnalignedStore[NumberOfAccessSizes];
- // For irregular sizes of any alignment:
- FunctionCallee EsanUnalignedLoadN, EsanUnalignedStoreN;
- FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
- Function *EsanCtorFunction;
- Function *EsanDtorFunction;
- // Remember the counter variable for each struct type to avoid
- // recomputing the variable name later during instrumentation.
- std::map<Type *, GlobalVariable *> StructTyMap;
- ShadowMemoryParams ShadowParams;
-};
-} // namespace
-
-char EfficiencySanitizer::ID = 0;
-INITIALIZE_PASS_BEGIN(
- EfficiencySanitizer, "esan",
- "EfficiencySanitizer: finds performance issues.", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_END(
- EfficiencySanitizer, "esan",
- "EfficiencySanitizer: finds performance issues.", false, false)
-
-StringRef EfficiencySanitizer::getPassName() const {
- return "EfficiencySanitizer";
-}
-
-void EfficiencySanitizer::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetLibraryInfoWrapperPass>();
-}
-
-ModulePass *
-llvm::createEfficiencySanitizerPass(const EfficiencySanitizerOptions &Options) {
- return new EfficiencySanitizer(Options);
-}
-
-void EfficiencySanitizer::initializeCallbacks(Module &M) {
- IRBuilder<> IRB(M.getContext());
- // Initialize the callbacks.
- for (size_t Idx = 0; Idx < NumberOfAccessSizes; ++Idx) {
- const unsigned ByteSize = 1U << Idx;
- std::string ByteSizeStr = utostr(ByteSize);
- // We'll inline the most common (i.e., aligned and frequent sizes)
- // load + store instrumentation: these callouts are for the slowpath.
- SmallString<32> AlignedLoadName("__esan_aligned_load" + ByteSizeStr);
- EsanAlignedLoad[Idx] = M.getOrInsertFunction(
- AlignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy());
- SmallString<32> AlignedStoreName("__esan_aligned_store" + ByteSizeStr);
- EsanAlignedStore[Idx] = M.getOrInsertFunction(
- AlignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy());
- SmallString<32> UnalignedLoadName("__esan_unaligned_load" + ByteSizeStr);
- EsanUnalignedLoad[Idx] = M.getOrInsertFunction(
- UnalignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy());
- SmallString<32> UnalignedStoreName("__esan_unaligned_store" + ByteSizeStr);
- EsanUnalignedStore[Idx] = M.getOrInsertFunction(
- UnalignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy());
- }
- EsanUnalignedLoadN = M.getOrInsertFunction(
- "__esan_unaligned_loadN", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
- EsanUnalignedStoreN = M.getOrInsertFunction(
- "__esan_unaligned_storeN", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
- MemmoveFn =
- M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
- MemcpyFn =
- M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
- MemsetFn =
- M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt32Ty(), IntptrTy);
-}
-
-bool EfficiencySanitizer::shouldIgnoreStructType(StructType *StructTy) {
- if (StructTy == nullptr || StructTy->isOpaque() /* no struct body */)
- return true;
- return false;
-}
-
-void EfficiencySanitizer::createStructCounterName(
- StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr) {
- // Append NumFields and field type ids to avoid struct conflicts
- // with the same name but different fields.
- if (StructTy->hasName())
- NameStr += StructTy->getName();
- else
- NameStr += "struct.anon";
- // We allow the actual size of the StructCounterName to be larger than
- // MaxStructCounterNameSize and append $NumFields and at least one
- // field type id.
- // Append $NumFields.
- NameStr += "$";
- Twine(StructTy->getNumElements()).toVector(NameStr);
- // Append struct field type ids in the reverse order.
- for (int i = StructTy->getNumElements() - 1; i >= 0; --i) {
- NameStr += "$";
- Twine(StructTy->getElementType(i)->getTypeID()).toVector(NameStr);
- if (NameStr.size() >= MaxStructCounterNameSize)
- break;
- }
- if (StructTy->isLiteral()) {
- // End with $ for literal struct.
- NameStr += "$";
- }
-}
-
-// Create global variables with auxiliary information (e.g., struct field size,
-// offset, and type name) for better user report.
-void EfficiencySanitizer::createCacheFragAuxGV(
- Module &M, const DataLayout &DL, StructType *StructTy,
- GlobalVariable *&TypeName, GlobalVariable *&Offset,
- GlobalVariable *&Size) {
- auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
- auto *Int32Ty = Type::getInt32Ty(*Ctx);
- // FieldTypeName.
- auto *TypeNameArrayTy = ArrayType::get(Int8PtrTy, StructTy->getNumElements());
- TypeName = new GlobalVariable(M, TypeNameArrayTy, true,
- GlobalVariable::InternalLinkage, nullptr);
- SmallVector<Constant *, 16> TypeNameVec;
- // FieldOffset.
- auto *OffsetArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements());
- Offset = new GlobalVariable(M, OffsetArrayTy, true,
- GlobalVariable::InternalLinkage, nullptr);
- SmallVector<Constant *, 16> OffsetVec;
- // FieldSize
- auto *SizeArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements());
- Size = new GlobalVariable(M, SizeArrayTy, true,
- GlobalVariable::InternalLinkage, nullptr);
- SmallVector<Constant *, 16> SizeVec;
- for (unsigned i = 0; i < StructTy->getNumElements(); ++i) {
- Type *Ty = StructTy->getElementType(i);
- std::string Str;
- raw_string_ostream StrOS(Str);
- Ty->print(StrOS);
- TypeNameVec.push_back(
- ConstantExpr::getPointerCast(
- createPrivateGlobalForString(M, StrOS.str(), true),
- Int8PtrTy));
- OffsetVec.push_back(
- ConstantInt::get(Int32Ty,
- DL.getStructLayout(StructTy)->getElementOffset(i)));
- SizeVec.push_back(ConstantInt::get(Int32Ty,
- DL.getTypeAllocSize(Ty)));
- }
- TypeName->setInitializer(ConstantArray::get(TypeNameArrayTy, TypeNameVec));
- Offset->setInitializer(ConstantArray::get(OffsetArrayTy, OffsetVec));
- Size->setInitializer(ConstantArray::get(SizeArrayTy, SizeVec));
-}
-
-// Create the global variable for the cache-fragmentation tool.
-GlobalVariable *EfficiencySanitizer::createCacheFragInfoGV(
- Module &M, const DataLayout &DL, Constant *UnitName) {
- assert(Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag);
-
- auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
- auto *Int8PtrPtrTy = Int8PtrTy->getPointerTo();
- auto *Int32Ty = Type::getInt32Ty(*Ctx);
- auto *Int32PtrTy = Type::getInt32PtrTy(*Ctx);
- auto *Int64Ty = Type::getInt64Ty(*Ctx);
- auto *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
- // This structure should be kept consistent with the StructInfo struct
- // in the runtime library.
- // struct StructInfo {
- // const char *StructName;
- // u32 Size;
- // u32 NumFields;
- // u32 *FieldOffset; // auxiliary struct field info.
- // u32 *FieldSize; // auxiliary struct field info.
- // const char **FieldTypeName; // auxiliary struct field info.
- // u64 *FieldCounters;
- // u64 *ArrayCounter;
- // };
- auto *StructInfoTy =
- StructType::get(Int8PtrTy, Int32Ty, Int32Ty, Int32PtrTy, Int32PtrTy,
- Int8PtrPtrTy, Int64PtrTy, Int64PtrTy);
- auto *StructInfoPtrTy = StructInfoTy->getPointerTo();
- // This structure should be kept consistent with the CacheFragInfo struct
- // in the runtime library.
- // struct CacheFragInfo {
- // const char *UnitName;
- // u32 NumStructs;
- // StructInfo *Structs;
- // };
- auto *CacheFragInfoTy = StructType::get(Int8PtrTy, Int32Ty, StructInfoPtrTy);
-
- std::vector<StructType *> Vec = M.getIdentifiedStructTypes();
- unsigned NumStructs = 0;
- SmallVector<Constant *, 16> Initializers;
-
- for (auto &StructTy : Vec) {
- if (shouldIgnoreStructType(StructTy)) {
- ++NumIgnoredStructs;
- continue;
- }
- ++NumStructs;
-
- // StructName.
- SmallString<MaxStructCounterNameSize> CounterNameStr;
- createStructCounterName(StructTy, CounterNameStr);
- GlobalVariable *StructCounterName = createPrivateGlobalForString(
- M, CounterNameStr, /*AllowMerging*/true);
-
- // Counters.
- // We create the counter array with StructCounterName and weak linkage
- // so that the structs with the same name and layout from different
- // compilation units will be merged into one.
- auto *CounterArrayTy = ArrayType::get(Int64Ty,
- getStructCounterSize(StructTy));
- GlobalVariable *Counters =
- new GlobalVariable(M, CounterArrayTy, false,
- GlobalVariable::WeakAnyLinkage,
- ConstantAggregateZero::get(CounterArrayTy),
- CounterNameStr);
-
- // Remember the counter variable for each struct type.
- StructTyMap.insert(std::pair<Type *, GlobalVariable *>(StructTy, Counters));
-
- // We pass the field type name array, offset array, and size array to
- // the runtime for better reporting.
- GlobalVariable *TypeName = nullptr, *Offset = nullptr, *Size = nullptr;
- if (ClAuxFieldInfo)
- createCacheFragAuxGV(M, DL, StructTy, TypeName, Offset, Size);
-
- Constant *FieldCounterIdx[2];
- FieldCounterIdx[0] = ConstantInt::get(Int32Ty, 0);
- FieldCounterIdx[1] = ConstantInt::get(Int32Ty,
- getFieldCounterIdx(StructTy));
- Constant *ArrayCounterIdx[2];
- ArrayCounterIdx[0] = ConstantInt::get(Int32Ty, 0);
- ArrayCounterIdx[1] = ConstantInt::get(Int32Ty,
- getArrayCounterIdx(StructTy));
- Initializers.push_back(ConstantStruct::get(
- StructInfoTy,
- ConstantExpr::getPointerCast(StructCounterName, Int8PtrTy),
- ConstantInt::get(Int32Ty,
- DL.getStructLayout(StructTy)->getSizeInBytes()),
- ConstantInt::get(Int32Ty, StructTy->getNumElements()),
- Offset == nullptr ? ConstantPointerNull::get(Int32PtrTy)
- : ConstantExpr::getPointerCast(Offset, Int32PtrTy),
- Size == nullptr ? ConstantPointerNull::get(Int32PtrTy)
- : ConstantExpr::getPointerCast(Size, Int32PtrTy),
- TypeName == nullptr
- ? ConstantPointerNull::get(Int8PtrPtrTy)
- : ConstantExpr::getPointerCast(TypeName, Int8PtrPtrTy),
- ConstantExpr::getGetElementPtr(CounterArrayTy, Counters,
- FieldCounterIdx),
- ConstantExpr::getGetElementPtr(CounterArrayTy, Counters,
- ArrayCounterIdx)));
- }
- // Structs.
- Constant *StructInfo;
- if (NumStructs == 0) {
- StructInfo = ConstantPointerNull::get(StructInfoPtrTy);
- } else {
- auto *StructInfoArrayTy = ArrayType::get(StructInfoTy, NumStructs);
- StructInfo = ConstantExpr::getPointerCast(
- new GlobalVariable(M, StructInfoArrayTy, false,
- GlobalVariable::InternalLinkage,
- ConstantArray::get(StructInfoArrayTy, Initializers)),
- StructInfoPtrTy);
- }
-
- auto *CacheFragInfoGV = new GlobalVariable(
- M, CacheFragInfoTy, true, GlobalVariable::InternalLinkage,
- ConstantStruct::get(CacheFragInfoTy, UnitName,
- ConstantInt::get(Int32Ty, NumStructs), StructInfo));
- return CacheFragInfoGV;
-}
-
-// Create the tool-specific argument passed to EsanInit and EsanExit.
-Constant *EfficiencySanitizer::createEsanInitToolInfoArg(Module &M,
- const DataLayout &DL) {
- // This structure contains tool-specific information about each compilation
- // unit (module) and is passed to the runtime library.
- GlobalVariable *ToolInfoGV = nullptr;
-
- auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
- // Compilation unit name.
- auto *UnitName = ConstantExpr::getPointerCast(
- createPrivateGlobalForString(M, M.getModuleIdentifier(), true),
- Int8PtrTy);
-
- // Create the tool-specific variable.
- if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag)
- ToolInfoGV = createCacheFragInfoGV(M, DL, UnitName);
-
- if (ToolInfoGV != nullptr)
- return ConstantExpr::getPointerCast(ToolInfoGV, Int8PtrTy);
-
- // Create the null pointer if no tool-specific variable created.
- return ConstantPointerNull::get(Int8PtrTy);
-}
-
-void EfficiencySanitizer::createDestructor(Module &M, Constant *ToolInfoArg) {
- PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
- EsanDtorFunction = Function::Create(FunctionType::get(Type::getVoidTy(*Ctx),
- false),
- GlobalValue::InternalLinkage,
- EsanModuleDtorName, &M);
- ReturnInst::Create(*Ctx, BasicBlock::Create(*Ctx, "", EsanDtorFunction));
- IRBuilder<> IRB_Dtor(EsanDtorFunction->getEntryBlock().getTerminator());
- FunctionCallee EsanExit =
- M.getOrInsertFunction(EsanExitName, IRB_Dtor.getVoidTy(), Int8PtrTy);
- IRB_Dtor.CreateCall(EsanExit, {ToolInfoArg});
- appendToGlobalDtors(M, EsanDtorFunction, EsanCtorAndDtorPriority);
-}
-
-bool EfficiencySanitizer::initOnModule(Module &M) {
-
- Triple TargetTriple(M.getTargetTriple());
- if (TargetTriple.isMIPS64())
- ShadowParams = ShadowParams40;
- else
- ShadowParams = ShadowParams47;
-
- Ctx = &M.getContext();
- const DataLayout &DL = M.getDataLayout();
- IRBuilder<> IRB(M.getContext());
- IntegerType *OrdTy = IRB.getInt32Ty();
- PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx);
- IntptrTy = DL.getIntPtrType(M.getContext());
- // Create the variable passed to EsanInit and EsanExit.
- Constant *ToolInfoArg = createEsanInitToolInfoArg(M, DL);
- // Constructor
- // We specify the tool type both in the EsanWhichToolName global
- // and as an arg to the init routine as a sanity check.
- std::tie(EsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
- M, EsanModuleCtorName, EsanInitName, /*InitArgTypes=*/{OrdTy, Int8PtrTy},
- /*InitArgs=*/{
- ConstantInt::get(OrdTy, static_cast<int>(Options.ToolType)),
- ToolInfoArg});
- appendToGlobalCtors(M, EsanCtorFunction, EsanCtorAndDtorPriority);
-
- createDestructor(M, ToolInfoArg);
-
- new GlobalVariable(M, OrdTy, true,
- GlobalValue::WeakAnyLinkage,
- ConstantInt::get(OrdTy,
- static_cast<int>(Options.ToolType)),
- EsanWhichToolName);
-
- return true;
-}
-
-Value *EfficiencySanitizer::appToShadow(Value *Shadow, IRBuilder<> &IRB) {
- // Shadow = ((App & Mask) + Offs) >> Scale
- Shadow = IRB.CreateAnd(Shadow, ConstantInt::get(IntptrTy, ShadowParams.ShadowMask));
- uint64_t Offs;
- int Scale = ShadowScale[Options.ToolType];
- if (Scale <= 2)
- Offs = ShadowParams.ShadowOffs[Scale];
- else
- Offs = ShadowParams.ShadowOffs[0] << Scale;
- Shadow = IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Offs));
- if (Scale > 0)
- Shadow = IRB.CreateLShr(Shadow, Scale);
- return Shadow;
-}
-
-bool EfficiencySanitizer::shouldIgnoreMemoryAccess(Instruction *I) {
- if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
- // We'd like to know about cache fragmentation in vtable accesses and
- // constant data references, so we do not currently ignore anything.
- return false;
- } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) {
- // TODO: the instrumentation disturbs the data layout on the stack, so we
- // may want to add an option to ignore stack references (if we can
- // distinguish them) to reduce overhead.
- }
- // TODO(bruening): future tools will be returning true for some cases.
- return false;
-}
-
-bool EfficiencySanitizer::runOnModule(Module &M) {
- bool Res = initOnModule(M);
- initializeCallbacks(M);
- for (auto &F : M) {
- Res |= runOnFunction(F, M);
- }
- return Res;
-}
-
-bool EfficiencySanitizer::runOnFunction(Function &F, Module &M) {
- // This is required to prevent instrumenting the call to __esan_init from
- // within the module constructor.
- if (&F == EsanCtorFunction)
- return false;
- SmallVector<Instruction *, 8> LoadsAndStores;
- SmallVector<Instruction *, 8> MemIntrinCalls;
- SmallVector<Instruction *, 8> GetElementPtrs;
- bool Res = false;
- const DataLayout &DL = M.getDataLayout();
- const TargetLibraryInfo *TLI =
- &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
-
- for (auto &BB : F) {
- for (auto &Inst : BB) {
- if ((isa<LoadInst>(Inst) || isa<StoreInst>(Inst) ||
- isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst)) &&
- !shouldIgnoreMemoryAccess(&Inst))
- LoadsAndStores.push_back(&Inst);
- else if (isa<MemIntrinsic>(Inst))
- MemIntrinCalls.push_back(&Inst);
- else if (isa<GetElementPtrInst>(Inst))
- GetElementPtrs.push_back(&Inst);
- else if (CallInst *CI = dyn_cast<CallInst>(&Inst))
- maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
- }
- }
-
- if (ClInstrumentLoadsAndStores) {
- for (auto Inst : LoadsAndStores) {
- Res |= instrumentLoadOrStore(Inst, DL);
- }
- }
-
- if (ClInstrumentMemIntrinsics) {
- for (auto Inst : MemIntrinCalls) {
- Res |= instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
- }
- }
-
- if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
- for (auto Inst : GetElementPtrs) {
- Res |= instrumentGetElementPtr(Inst, M);
- }
- }
-
- return Res;
-}
-
-bool EfficiencySanitizer::instrumentLoadOrStore(Instruction *I,
- const DataLayout &DL) {
- IRBuilder<> IRB(I);
- bool IsStore;
- Value *Addr;
- unsigned Alignment;
- if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
- IsStore = false;
- Alignment = Load->getAlignment();
- Addr = Load->getPointerOperand();
- } else if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
- IsStore = true;
- Alignment = Store->getAlignment();
- Addr = Store->getPointerOperand();
- } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- IsStore = true;
- Alignment = 0;
- Addr = RMW->getPointerOperand();
- } else if (AtomicCmpXchgInst *Xchg = dyn_cast<AtomicCmpXchgInst>(I)) {
- IsStore = true;
- Alignment = 0;
- Addr = Xchg->getPointerOperand();
- } else
- llvm_unreachable("Unsupported mem access type");
-
- Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
- const uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8;
- FunctionCallee OnAccessFunc = nullptr;
-
- // Convert 0 to the default alignment.
- if (Alignment == 0)
- Alignment = DL.getPrefTypeAlignment(OrigTy);
-
- if (IsStore)
- NumInstrumentedStores++;
- else
- NumInstrumentedLoads++;
- int Idx = getMemoryAccessFuncIndex(Addr, DL);
- if (Idx < 0) {
- OnAccessFunc = IsStore ? EsanUnalignedStoreN : EsanUnalignedLoadN;
- IRB.CreateCall(OnAccessFunc,
- {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- ConstantInt::get(IntptrTy, TypeSizeBytes)});
- } else {
- if (ClInstrumentFastpath &&
- instrumentFastpath(I, DL, IsStore, Addr, Alignment)) {
- NumFastpaths++;
- return true;
- }
- if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0)
- OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx];
- else
- OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx];
- IRB.CreateCall(OnAccessFunc,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
- }
- return true;
-}
-
-// It's simplest to replace the memset/memmove/memcpy intrinsics with
-// calls that the runtime library intercepts.
-// Our pass is late enough that calls should not turn back into intrinsics.
-bool EfficiencySanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
- IRBuilder<> IRB(MI);
- bool Res = false;
- if (isa<MemSetInst>(MI)) {
- IRB.CreateCall(
- MemsetFn,
- {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getArgOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)});
- MI->eraseFromParent();
- Res = true;
- } else if (isa<MemTransferInst>(MI)) {
- IRB.CreateCall(
- isa<MemCpyInst>(MI) ? MemcpyFn : MemmoveFn,
- {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)});
- MI->eraseFromParent();
- Res = true;
- } else
- llvm_unreachable("Unsupported mem intrinsic type");
- return Res;
-}
-
-bool EfficiencySanitizer::instrumentGetElementPtr(Instruction *I, Module &M) {
- GetElementPtrInst *GepInst = dyn_cast<GetElementPtrInst>(I);
- bool Res = false;
- if (GepInst == nullptr || GepInst->getNumIndices() == 1) {
- ++NumIgnoredGEPs;
- return false;
- }
- Type *SourceTy = GepInst->getSourceElementType();
- StructType *StructTy = nullptr;
- ConstantInt *Idx;
- // Check if GEP calculates address from a struct array.
- if (isa<StructType>(SourceTy)) {
- StructTy = cast<StructType>(SourceTy);
- Idx = dyn_cast<ConstantInt>(GepInst->getOperand(1));
- if ((Idx == nullptr || Idx->getSExtValue() != 0) &&
- !shouldIgnoreStructType(StructTy) && StructTyMap.count(StructTy) != 0)
- Res |= insertCounterUpdate(I, StructTy, getArrayCounterIdx(StructTy));
- }
- // Iterate all (except the first and the last) idx within each GEP instruction
- // for possible nested struct field address calculation.
- for (unsigned i = 1; i < GepInst->getNumIndices(); ++i) {
- SmallVector<Value *, 8> IdxVec(GepInst->idx_begin(),
- GepInst->idx_begin() + i);
- Type *Ty = GetElementPtrInst::getIndexedType(SourceTy, IdxVec);
- unsigned CounterIdx = 0;
- if (isa<ArrayType>(Ty)) {
- ArrayType *ArrayTy = cast<ArrayType>(Ty);
- StructTy = dyn_cast<StructType>(ArrayTy->getElementType());
- if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0)
- continue;
- // The last counter for struct array access.
- CounterIdx = getArrayCounterIdx(StructTy);
- } else if (isa<StructType>(Ty)) {
- StructTy = cast<StructType>(Ty);
- if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0)
- continue;
- // Get the StructTy's subfield index.
- Idx = cast<ConstantInt>(GepInst->getOperand(i+1));
- assert(Idx->getSExtValue() >= 0 &&
- Idx->getSExtValue() < StructTy->getNumElements());
- CounterIdx = getFieldCounterIdx(StructTy) + Idx->getSExtValue();
- }
- Res |= insertCounterUpdate(I, StructTy, CounterIdx);
- }
- if (Res)
- ++NumInstrumentedGEPs;
- else
- ++NumIgnoredGEPs;
- return Res;
-}
-
-bool EfficiencySanitizer::insertCounterUpdate(Instruction *I,
- StructType *StructTy,
- unsigned CounterIdx) {
- GlobalVariable *CounterArray = StructTyMap[StructTy];
- if (CounterArray == nullptr)
- return false;
- IRBuilder<> IRB(I);
- Constant *Indices[2];
- // Xref http://llvm.org/docs/LangRef.html#i-getelementptr and
- // http://llvm.org/docs/GetElementPtr.html.
- // The first index of the GEP instruction steps through the first operand,
- // i.e., the array itself.
- Indices[0] = ConstantInt::get(IRB.getInt32Ty(), 0);
- // The second index is the index within the array.
- Indices[1] = ConstantInt::get(IRB.getInt32Ty(), CounterIdx);
- Constant *Counter =
- ConstantExpr::getGetElementPtr(
- ArrayType::get(IRB.getInt64Ty(), getStructCounterSize(StructTy)),
- CounterArray, Indices);
- Value *Load = IRB.CreateLoad(IRB.getInt64Ty(), Counter);
- IRB.CreateStore(IRB.CreateAdd(Load, ConstantInt::get(IRB.getInt64Ty(), 1)),
- Counter);
- return true;
-}
-
-int EfficiencySanitizer::getMemoryAccessFuncIndex(Value *Addr,
- const DataLayout &DL) {
- Type *OrigPtrTy = Addr->getType();
- Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
- assert(OrigTy->isSized());
- // The size is always a multiple of 8.
- uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8;
- if (TypeSizeBytes != 1 && TypeSizeBytes != 2 && TypeSizeBytes != 4 &&
- TypeSizeBytes != 8 && TypeSizeBytes != 16) {
- // Irregular sizes do not have per-size call targets.
- NumAccessesWithIrregularSize++;
- return -1;
- }
- size_t Idx = countTrailingZeros(TypeSizeBytes);
- assert(Idx < NumberOfAccessSizes);
- return Idx;
-}
-
-bool EfficiencySanitizer::instrumentFastpath(Instruction *I,
- const DataLayout &DL, bool IsStore,
- Value *Addr, unsigned Alignment) {
- if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) {
- return instrumentFastpathCacheFrag(I, DL, Addr, Alignment);
- } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) {
- return instrumentFastpathWorkingSet(I, DL, Addr, Alignment);
- }
- return false;
-}
-
-bool EfficiencySanitizer::instrumentFastpathCacheFrag(Instruction *I,
- const DataLayout &DL,
- Value *Addr,
- unsigned Alignment) {
- // Do nothing.
- return true; // Return true to avoid slowpath instrumentation.
-}
-
-bool EfficiencySanitizer::instrumentFastpathWorkingSet(
- Instruction *I, const DataLayout &DL, Value *Addr, unsigned Alignment) {
- assert(ShadowScale[Options.ToolType] == 6); // The code below assumes this
- IRBuilder<> IRB(I);
- Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
- const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
- // Bail to the slowpath if the access might touch multiple cache lines.
- // An access aligned to its size is guaranteed to be intra-cache-line.
- // getMemoryAccessFuncIndex has already ruled out a size larger than 16
- // and thus larger than a cache line for platforms this tool targets
- // (and our shadow memory setup assumes 64-byte cache lines).
- assert(TypeSize <= 128);
- if (!(TypeSize == 8 ||
- (Alignment % (TypeSize / 8)) == 0)) {
- if (ClAssumeIntraCacheLine)
- ++NumAssumedIntraCacheLine;
- else
- return false;
- }
-
- // We inline instrumentation to set the corresponding shadow bits for
- // each cache line touched by the application. Here we handle a single
- // load or store where we've already ruled out the possibility that it
- // might touch more than one cache line and thus we simply update the
- // shadow memory for a single cache line.
- // Our shadow memory model is fine with races when manipulating shadow values.
- // We generate the following code:
- //
- // const char BitMask = 0x81;
- // char *ShadowAddr = appToShadow(AppAddr);
- // if ((*ShadowAddr & BitMask) != BitMask)
- // *ShadowAddr |= Bitmask;
- //
- Value *AddrPtr = IRB.CreatePointerCast(Addr, IntptrTy);
- Value *ShadowPtr = appToShadow(AddrPtr, IRB);
- Type *ShadowTy = IntegerType::get(*Ctx, 8U);
- Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
- // The bottom bit is used for the current sampling period's working set.
- // The top bit is used for the total working set. We set both on each
- // memory access, if they are not already set.
- Value *ValueMask = ConstantInt::get(ShadowTy, 0x81); // 10000001B
-
- Value *OldValue =
- IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
- // The AND and CMP will be turned into a TEST instruction by the compiler.
- Value *Cmp = IRB.CreateICmpNE(IRB.CreateAnd(OldValue, ValueMask), ValueMask);
- Instruction *CmpTerm = SplitBlockAndInsertIfThen(Cmp, I, false);
- // FIXME: do I need to call SetCurrentDebugLocation?
- IRB.SetInsertPoint(CmpTerm);
- // We use OR to set the shadow bits to avoid corrupting the middle 6 bits,
- // which are used by the runtime library.
- Value *NewVal = IRB.CreateOr(OldValue, ValueMask);
- IRB.CreateStore(NewVal, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
- IRB.SetInsertPoint(I);
-
- return true;
-}
initializeThreadSanitizerLegacyPassPass(Registry);
initializeSanitizerCoverageModulePass(Registry);
initializeDataFlowSanitizerPass(Registry);
- initializeEfficiencySanitizerPass(Registry);
}
/// LLVMInitializeInstrumentation - C binding for
+++ /dev/null
-; Test marking string functions as nobuiltin in efficiency sanitizer.
-;
-; RUN: opt < %s -esan -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-declare i8* @memchr(i8* %a, i32 %b, i64 %c)
-declare i32 @memcmp(i8* %a, i8* %b, i64 %c)
-declare i32 @strcmp(i8* %a, i8* %b)
-declare i8* @strcpy(i8* %a, i8* %b)
-declare i8* @stpcpy(i8* %a, i8* %b)
-declare i64 @strlen(i8* %a)
-declare i64 @strnlen(i8* %a, i64 %b)
-
-; CHECK: call{{.*}}@memchr{{.*}} #[[ATTR:[0-9]+]]
-; CHECK: call{{.*}}@memcmp{{.*}} #[[ATTR]]
-; CHECK: call{{.*}}@strcmp{{.*}} #[[ATTR]]
-; CHECK: call{{.*}}@strcpy{{.*}} #[[ATTR]]
-; CHECK: call{{.*}}@stpcpy{{.*}} #[[ATTR]]
-; CHECK: call{{.*}}@strlen{{.*}} #[[ATTR]]
-; CHECK: call{{.*}}@strnlen{{.*}} #[[ATTR]]
-; attributes #[[ATTR]] = { nobuiltin }
-
-define void @f1(i8* %a, i8* %b) nounwind uwtable {
- tail call i8* @memchr(i8* %a, i32 1, i64 12)
- tail call i32 @memcmp(i8* %a, i8* %b, i64 12)
- tail call i32 @strcmp(i8* %a, i8* %b)
- tail call i8* @strcpy(i8* %a, i8* %b)
- tail call i8* @stpcpy(i8* %a, i8* %b)
- tail call i64 @strlen(i8* %a)
- tail call i64 @strnlen(i8* %a, i64 12)
- ret void
-}
+++ /dev/null
-; Test basic EfficiencySanitizer struct field count instrumentation.
-;
-; RUN: opt < %s -esan -esan-cache-frag -S | FileCheck %s
-
-%struct.A = type { i32, i32 }
-%union.U = type { double }
-%struct.C = type { %struct.anon, %union.anon, [10 x i8] }
-%struct.anon = type { i32, i32 }
-%union.anon = type { double }
-
-; CHECK: @0 = private unnamed_addr constant [8 x i8] c"<stdin>\00", align 1
-; CHECK-NEXT: @1 = private unnamed_addr constant [17 x i8] c"struct.A$2$11$11\00", align 1
-; CHECK-NEXT: @"struct.A$2$11$11" = weak global [3 x i64] zeroinitializer
-; CHECK-NEXT: @2 = internal constant [2 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @5, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @6, i32 0, i32 0)]
-; CHECK-NEXT: @3 = internal constant [2 x i32] [i32 0, i32 4]
-; CHECK-NEXT: @4 = internal constant [2 x i32] [i32 4, i32 4]
-; CHECK-NEXT: @5 = private unnamed_addr constant [4 x i8] c"i32\00", align 1
-; CHECK-NEXT: @6 = private unnamed_addr constant [4 x i8] c"i32\00", align 1
-; CHECK-NEXT: @7 = private unnamed_addr constant [12 x i8] c"union.U$1$3\00", align 1
-; CHECK-NEXT: @"union.U$1$3" = weak global [2 x i64] zeroinitializer
-; CHECK-NEXT: @8 = internal constant [1 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)]
-; CHECK-NEXT: @9 = internal constant [1 x i32] zeroinitializer
-; CHECK-NEXT: @10 = internal constant [1 x i32] [i32 8]
-; CHECK-NEXT: @11 = private unnamed_addr constant [7 x i8] c"double\00", align 1
-; CHECK-NEXT: @12 = private unnamed_addr constant [20 x i8] c"struct.C$3$14$13$13\00", align 1
-; CHECK-NEXT: @"struct.C$3$14$13$13" = weak global [4 x i64] zeroinitializer
-; CHECK-NEXT: @13 = internal constant [3 x i8*] [i8* getelementptr inbounds ([33 x i8], [33 x i8]* @16, i32 0, i32 0), i8* getelementptr inbounds ([30 x i8], [30 x i8]* @17, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @18, i32 0, i32 0)]
-; CHECK-NEXT: @14 = internal constant [3 x i32] [i32 0, i32 8, i32 16]
-; CHECK-NEXT: @15 = internal constant [3 x i32] [i32 8, i32 8, i32 10]
-; CHECK-NEXT: @16 = private unnamed_addr constant [33 x i8] c"%struct.anon = type { i32, i32 }\00", align 1
-; CHECK-NEXT: @17 = private unnamed_addr constant [30 x i8] c"%union.anon = type { double }\00", align 1
-; CHECK-NEXT: @18 = private unnamed_addr constant [10 x i8] c"[10 x i8]\00", align 1
-; CHECK-NEXT: @19 = private unnamed_addr constant [20 x i8] c"struct.anon$2$11$11\00", align 1
-; CHECK-NEXT: @"struct.anon$2$11$11" = weak global [3 x i64] zeroinitializer
-; CHECK-NEXT: @20 = internal constant [2 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @23, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @24, i32 0, i32 0)]
-; CHECK-NEXT: @21 = internal constant [2 x i32] [i32 0, i32 4]
-; CHECK-NEXT: @22 = internal constant [2 x i32] [i32 4, i32 4]
-; CHECK-NEXT: @23 = private unnamed_addr constant [4 x i8] c"i32\00", align 1
-; CHECK-NEXT: @24 = private unnamed_addr constant [4 x i8] c"i32\00", align 1
-; CHECK-NEXT: @25 = private unnamed_addr constant [15 x i8] c"union.anon$1$3\00", align 1
-; CHECK-NEXT: @"union.anon$1$3" = weak global [2 x i64] zeroinitializer
-; CHECK-NEXT: @26 = internal constant [1 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @29, i32 0, i32 0)]
-; CHECK-NEXT: @27 = internal constant [1 x i32] zeroinitializer
-; CHECK-NEXT: @28 = internal constant [1 x i32] [i32 8]
-; CHECK-NEXT: @29 = private unnamed_addr constant [7 x i8] c"double\00", align 1
-; CHECK-NEXT: @30 = internal global [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }] [{ i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([17 x i8], [17 x i8]* @1, i32 0, i32 0), i32 8, i32 2, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @3, i32 0, i32 0), i32* getelementptr inbounds ([2 x i32], [2 x i32]* @4, i32 0, i32 0), i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([12 x i8], [12 x i8]* @7, i32 0, i32 0), i32 8, i32 1, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @9, i32 0, i32 0), i32* getelementptr inbounds ([1 x i32], [1 x i32]* @10, i32 0, i32 0), i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @8, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 1) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @12, i32 0, i32 0), i32 32, i32 3, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @14, i32 0, i32 0), i32* getelementptr inbounds ([3 x i32], [3 x i32]* @15, i32 0, i32 0), i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @13, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @19, i32 0, i32 0), i32 8, i32 2, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @21, i32 0, i32 0), i32* getelementptr inbounds ([2 x i32], [2 x i32]* @22, i32 0, i32 0), i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @20, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([15 x i8], [15 x i8]* @25, i32 0, i32 0), i32 8, i32 1, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @27, i32 0, i32 0), i32* getelementptr inbounds ([1 x i32], [1 x i32]* @28, i32 0, i32 0), i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @26, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 1) }]
-; CHECK-NEXT: @31 = internal constant { i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* } { i8* getelementptr inbounds ([8 x i8], [8 x i8]* @0, i32 0, i32 0), i32 5, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* getelementptr inbounds ([5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }], [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }]* @30, i32 0, i32 0) }
-
-define i32 @main() {
-entry:
- %a = alloca %struct.A, align 4
- %u = alloca %union.U, align 8
- %c = alloca [2 x %struct.C], align 16
- %k = alloca %struct.A*, align 8
- %x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
- %y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
- %f = bitcast %union.U* %u to float*
- %d = bitcast %union.U* %u to double*
- %arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
- %x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
- %arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
- %cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
- %y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
- %arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
- %f6 = bitcast %union.anon* %cu to float*
- %arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
- %cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
- %d9 = bitcast %union.anon* %cu8 to double*
- %arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
- %arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
- %k1 = load %struct.A*, %struct.A** %k, align 8
- %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
- ret i32 0
-}
-
-; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
-; CHECK: @llvm.global_dtors = {{.*}}@esan.module_dtor
-
-; CHECK: %a = alloca %struct.A, align 4
-; CHECK-NEXT: %u = alloca %union.U, align 8
-; CHECK-NEXT: %c = alloca [2 x %struct.C], align 16
-; CHECK-NEXT: %k = alloca %struct.A*, align 8
-; CHECK-NEXT: %0 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %1 = add i64 %0, 1
-; CHECK-NEXT: store i64 %1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
-; CHECK-NEXT: %2 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %3 = add i64 %2, 1
-; CHECK-NEXT: store i64 %3, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
-; CHECK-NEXT: %f = bitcast %union.U* %u to float*
-; CHECK-NEXT: %d = bitcast %union.U* %u to double*
-; CHECK-NEXT: %4 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %5 = add i64 %4, 1
-; CHECK-NEXT: store i64 %5, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %6 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %7 = add i64 %6, 1
-; CHECK-NEXT: store i64 %7, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
-; CHECK-NEXT: %8 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %9 = add i64 %8, 1
-; CHECK-NEXT: store i64 %9, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
-; CHECK-NEXT: %10 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %11 = add i64 %10, 1
-; CHECK-NEXT: store i64 %11, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
-; CHECK-NEXT: %12 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %13 = add i64 %12, 1
-; CHECK-NEXT: store i64 %13, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
-; CHECK-NEXT: %14 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %15 = add i64 %14, 1
-; CHECK-NEXT: store i64 %15, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
-; CHECK-NEXT: %16 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %17 = add i64 %16, 1
-; CHECK-NEXT: store i64 %17, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %18 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %19 = add i64 %18, 1
-; CHECK-NEXT: store i64 %19, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
-; CHECK-NEXT: %f6 = bitcast %union.anon* %cu to float*
-; CHECK-NEXT: %20 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %21 = add i64 %20, 1
-; CHECK-NEXT: store i64 %21, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
-; CHECK-NEXT: %22 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %23 = add i64 %22, 1
-; CHECK-NEXT: store i64 %23, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
-; CHECK-NEXT: %d9 = bitcast %union.anon* %cu8 to double*
-; CHECK-NEXT: %24 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %25 = add i64 %24, 1
-; CHECK-NEXT: store i64 %25, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %26 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
-; CHECK-NEXT: %27 = add i64 %26, 1
-; CHECK-NEXT: store i64 %27, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
-; CHECK-NEXT: %c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
-; CHECK-NEXT: %arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
-; CHECK-NEXT: %k1 = load %struct.A*, %struct.A** %k, align 8
-; CHECK-NEXT: %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
-; CHECK-NEXT: ret i32 0
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Top-level:
-
-; CHECK: define internal void @esan.module_ctor()
-; CHECK: call void @__esan_init(i32 1, i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @31 to i8*))
-; CHECK: define internal void @esan.module_dtor()
-; CHECK: call void @__esan_exit(i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @31 to i8*))
+++ /dev/null
-; Test the complex GetElementPtr instruction handling in the EfficiencySanitizer
-; cache fragmentation tool.
-;
-; RUN: opt < %s -esan -esan-cache-frag -S | FileCheck %s
-
-; Code from http://llvm.org/docs/LangRef.html#getelementptr-instruction
-; struct RT {
-; char A;
-; int B[10][20];
-; char C;
-; };
-; struct ST {
-; int X;
-; double Y;
-; struct RT Z;
-; };
-;
-; int *foo(struct ST *s) {
-; return &s[1].Z.B[5][13];
-; }
-
-%struct.RT = type { i8, [10 x [20 x i32]], i8 }
-%struct.ST = type { i32, double, %struct.RT }
-
-define i32* @foo(%struct.ST* %s) nounwind uwtable readnone optsize ssp {
-entry:
- %arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13
- ret i32* %arrayidx
-}
-
-; CHECK: %0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 3)
-; CHECK-NEXT: %1 = add i64 %0, 1
-; CHECK-NEXT: store i64 %1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 3)
-; CHECK-NEXT: %2 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 2)
-; CHECK-NEXT: %3 = add i64 %2, 1
-; CHECK-NEXT: store i64 %3, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 2)
-; CHECK-NEXT: %4 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.RT$3$11$14$11", i32 0, i32 1)
-; CHECK-NEXT: %5 = add i64 %4, 1
-; CHECK-NEXT: store i64 %5, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.RT$3$11$14$11", i32 0, i32 1)
-; CHECK-NEXT: %arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13
-; CHECK-NEXT: ret i32* %arrayidx
+++ /dev/null
-; Test basic EfficiencySanitizer struct field count instrumentation with -esan-small-binary
-;
-; RUN: opt < %s -esan -esan-cache-frag -esan-aux-field-info=false -S | FileCheck %s
-
-%struct.A = type { i32, i32 }
-%union.U = type { double }
-%struct.C = type { %struct.anon, %union.anon, [10 x i8] }
-%struct.anon = type { i32, i32 }
-%union.anon = type { double }
-
-; CHECK: @0 = private unnamed_addr constant [8 x i8] c"<stdin>\00", align 1
-; CHECK-NEXT: @1 = private unnamed_addr constant [17 x i8] c"struct.A$2$11$11\00", align 1
-; CHECK-NEXT: @"struct.A$2$11$11" = weak global [3 x i64] zeroinitializer
-; CHECK-NEXT: @2 = private unnamed_addr constant [12 x i8] c"union.U$1$3\00", align 1
-; CHECK-NEXT: @"union.U$1$3" = weak global [2 x i64] zeroinitializer
-; CHECK-NEXT: @3 = private unnamed_addr constant [20 x i8] c"struct.C$3$14$13$13\00", align 1
-; CHECK-NEXT: @"struct.C$3$14$13$13" = weak global [4 x i64] zeroinitializer
-; CHECK-NEXT: @4 = private unnamed_addr constant [20 x i8] c"struct.anon$2$11$11\00", align 1
-; CHECK-NEXT: @"struct.anon$2$11$11" = weak global [3 x i64] zeroinitializer
-; CHECK-NEXT: @5 = private unnamed_addr constant [15 x i8] c"union.anon$1$3\00", align 1
-; CHECK-NEXT: @"union.anon$1$3" = weak global [2 x i64] zeroinitializer
-; CHECK-NEXT: @6 = internal global [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }] [{ i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([17 x i8], [17 x i8]* @1, i32 0, i32 0), i32 8, i32 2, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([12 x i8], [12 x i8]* @2, i32 0, i32 0), i32 8, i32 1, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 1) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @3, i32 0, i32 0), i32 32, i32 3, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @4, i32 0, i32 0), i32 8, i32 2, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([15 x i8], [15 x i8]* @5, i32 0, i32 0), i32 8, i32 1, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 1) }]
-; CHECK-NEXT: @7 = internal constant { i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* } { i8* getelementptr inbounds ([8 x i8], [8 x i8]* @0, i32 0, i32 0), i32 5, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* getelementptr inbounds ([5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }], [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }]* @6, i32 0, i32 0) }
-
-define i32 @main() {
-entry:
- %a = alloca %struct.A, align 4
- %u = alloca %union.U, align 8
- %c = alloca [2 x %struct.C], align 16
- %k = alloca %struct.A*, align 8
- %x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
- %y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
- %f = bitcast %union.U* %u to float*
- %d = bitcast %union.U* %u to double*
- %arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
- %x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
- %arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
- %cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
- %y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
- %arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
- %f6 = bitcast %union.anon* %cu to float*
- %arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
- %cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
- %d9 = bitcast %union.anon* %cu8 to double*
- %arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
- %c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
- %arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
- %k1 = load %struct.A*, %struct.A** %k, align 8
- %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
- ret i32 0
-}
-
-; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
-; CHECK: @llvm.global_dtors = {{.*}}@esan.module_dtor
-
-; CHECK: %a = alloca %struct.A, align 4
-; CHECK-NEXT: %u = alloca %union.U, align 8
-; CHECK-NEXT: %c = alloca [2 x %struct.C], align 16
-; CHECK-NEXT: %k = alloca %struct.A*, align 8
-; CHECK-NEXT: %0 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %1 = add i64 %0, 1
-; CHECK-NEXT: store i64 %1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
-; CHECK-NEXT: %2 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %3 = add i64 %2, 1
-; CHECK-NEXT: store i64 %3, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
-; CHECK-NEXT: %f = bitcast %union.U* %u to float*
-; CHECK-NEXT: %d = bitcast %union.U* %u to double*
-; CHECK-NEXT: %4 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %5 = add i64 %4, 1
-; CHECK-NEXT: store i64 %5, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %6 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %7 = add i64 %6, 1
-; CHECK-NEXT: store i64 %7, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
-; CHECK-NEXT: %8 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %9 = add i64 %8, 1
-; CHECK-NEXT: store i64 %9, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
-; CHECK-NEXT: %x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
-; CHECK-NEXT: %10 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %11 = add i64 %10, 1
-; CHECK-NEXT: store i64 %11, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
-; CHECK-NEXT: %12 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %13 = add i64 %12, 1
-; CHECK-NEXT: store i64 %13, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
-; CHECK-NEXT: %cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
-; CHECK-NEXT: %14 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %15 = add i64 %14, 1
-; CHECK-NEXT: store i64 %15, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
-; CHECK-NEXT: %y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
-; CHECK-NEXT: %16 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %17 = add i64 %16, 1
-; CHECK-NEXT: store i64 %17, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %18 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %19 = add i64 %18, 1
-; CHECK-NEXT: store i64 %19, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
-; CHECK-NEXT: %f6 = bitcast %union.anon* %cu to float*
-; CHECK-NEXT: %20 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %21 = add i64 %20, 1
-; CHECK-NEXT: store i64 %21, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
-; CHECK-NEXT: %22 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %23 = add i64 %22, 1
-; CHECK-NEXT: store i64 %23, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
-; CHECK-NEXT: %cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
-; CHECK-NEXT: %d9 = bitcast %union.anon* %cu8 to double*
-; CHECK-NEXT: %24 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %25 = add i64 %24, 1
-; CHECK-NEXT: store i64 %25, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
-; CHECK-NEXT: %arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
-; CHECK-NEXT: %26 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
-; CHECK-NEXT: %27 = add i64 %26, 1
-; CHECK-NEXT: store i64 %27, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
-; CHECK-NEXT: %c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
-; CHECK-NEXT: %arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
-; CHECK-NEXT: %k1 = load %struct.A*, %struct.A** %k, align 8
-; CHECK-NEXT: %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
-; CHECK-NEXT: ret i32 0
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Top-level:
-
-; CHECK: define internal void @esan.module_ctor()
-; CHECK: call void @__esan_init(i32 1, i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @7 to i8*))
-; CHECK: define internal void @esan.module_dtor()
-; CHECK: call void @__esan_exit(i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @7 to i8*))
+++ /dev/null
-; Test basic EfficiencySanitizer working set instrumentation.
-;
-; RUN: opt < %s -esan -esan-working-set -S | FileCheck %s
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Intra-cache-line
-
-define i8 @aligned1(i8* %a) {
-entry:
- %tmp1 = load i8, i8* %a, align 1
- ret i8 %tmp1
-; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
-; CHECK: %0 = ptrtoint i8* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i8, i8* %a, align 1
-; CHECK-NEXT: ret i8 %tmp1
-}
-
-define i16 @aligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 2
- ret i16 %tmp1
-; CHECK: %0 = ptrtoint i16* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i16, i16* %a, align 2
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @aligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 4
- ret i32 %tmp1
-; CHECK: %0 = ptrtoint i32* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i32, i32* %a, align 4
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @aligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 8
- ret i64 %tmp1
-; CHECK: %0 = ptrtoint i64* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i64, i64* %a, align 8
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @aligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 16
- ret i128 %tmp1
-; CHECK: %0 = ptrtoint i128* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i128, i128* %a, align 16
-; CHECK-NEXT: ret i128 %tmp1
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Not guaranteed to be intra-cache-line, but our defaults are to
-; assume they are:
-
-define i16 @unaligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 1
- ret i16 %tmp1
-; CHECK: %0 = ptrtoint i16* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i16, i16* %a, align 1
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @unaligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 2
- ret i32 %tmp1
-; CHECK: %0 = ptrtoint i32* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i32, i32* %a, align 2
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @unaligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 4
- ret i64 %tmp1
-; CHECK: %0 = ptrtoint i64* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i64, i64* %a, align 4
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @unaligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 8
- ret i128 %tmp1
-; CHECK: %0 = ptrtoint i128* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i128, i128* %a, align 8
-; CHECK-NEXT: ret i128 %tmp1
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Ensure that esan converts intrinsics to calls:
-
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-
-define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
-entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
- ret void
-; CHECK: define void @memCpyTest
-; CHECK: call i8* @memcpy
-; CHECK: ret void
-}
-
-define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
-entry:
- tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
- ret void
-; CHECK: define void @memMoveTest
-; CHECK: call i8* @memmove
-; CHECK: ret void
-}
-
-define void @memSetTest(i8* nocapture %x) {
-entry:
- tail call void @llvm.memset.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
- ret void
-; CHECK: define void @memSetTest
-; CHECK: call i8* @memset
-; CHECK: ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Ensure that esan doesn't convert element atomic memory intrinsics to
-; calls.
-
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-
-define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
- ; CHECK-LABEL: elementAtomic_memCpyTest
- ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ret void
-}
-
-define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
- ; CHECK-LABEL: elementAtomic_memMoveTest
- ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ret void
-}
-
-define void @elementAtomic_memSetTest(i8* nocapture %x) {
- ; CHECK-LABEL: elementAtomic_memSetTest
- ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
- ret void
-}
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Top-level:
-
-; CHECK: define internal void @esan.module_ctor()
-; CHECK: call void @__esan_init(i32 2, i8* null)
-; CHECK: define internal void @esan.module_dtor()
-; CHECK: call void @__esan_exit(i8* null)
+++ /dev/null
-; Test basic EfficiencySanitizer slowpath instrumentation.
-;
-; RUN: opt < %s -esan -esan-working-set -esan-instrument-fastpath=false -S | FileCheck %s
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Aligned loads:
-
-define i8 @loadAligned1(i8* %a) {
-entry:
- %tmp1 = load i8, i8* %a, align 1
- ret i8 %tmp1
-; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
-; CHECK: call void @__esan_aligned_load1(i8* %a)
-; CHECK-NEXT: %tmp1 = load i8, i8* %a, align 1
-; CHECK-NEXT: ret i8 %tmp1
-}
-
-define i16 @loadAligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 2
- ret i16 %tmp1
-; CHECK: %0 = bitcast i16* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_load2(i8* %0)
-; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 2
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @loadAligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 4
- ret i32 %tmp1
-; CHECK: %0 = bitcast i32* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_load4(i8* %0)
-; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @loadAligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 8
- ret i64 %tmp1
-; CHECK: %0 = bitcast i64* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_load8(i8* %0)
-; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 8
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @loadAligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 16
- ret i128 %tmp1
-; CHECK: %0 = bitcast i128* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_load16(i8* %0)
-; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 16
-; CHECK-NEXT: ret i128 %tmp1
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Aligned stores:
-
-define void @storeAligned1(i8* %a) {
-entry:
- store i8 1, i8* %a, align 1
- ret void
-; CHECK: call void @__esan_aligned_store1(i8* %a)
-; CHECK-NEXT: store i8 1, i8* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-define void @storeAligned2(i16* %a) {
-entry:
- store i16 1, i16* %a, align 2
- ret void
-; CHECK: %0 = bitcast i16* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_store2(i8* %0)
-; CHECK-NEXT: store i16 1, i16* %a, align 2
-; CHECK-NEXT: ret void
-}
-
-define void @storeAligned4(i32* %a) {
-entry:
- store i32 1, i32* %a, align 4
- ret void
-; CHECK: %0 = bitcast i32* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_store4(i8* %0)
-; CHECK-NEXT: store i32 1, i32* %a, align 4
-; CHECK-NEXT: ret void
-}
-
-define void @storeAligned8(i64* %a) {
-entry:
- store i64 1, i64* %a, align 8
- ret void
-; CHECK: %0 = bitcast i64* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_store8(i8* %0)
-; CHECK-NEXT: store i64 1, i64* %a, align 8
-; CHECK-NEXT: ret void
-}
-
-define void @storeAligned16(i128* %a) {
-entry:
- store i128 1, i128* %a, align 16
- ret void
-; CHECK: %0 = bitcast i128* %a to i8*
-; CHECK-NEXT: call void @__esan_aligned_store16(i8* %0)
-; CHECK-NEXT: store i128 1, i128* %a, align 16
-; CHECK-NEXT: ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Unaligned loads:
-
-define i16 @loadUnaligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 1
- ret i16 %tmp1
-; CHECK: %0 = bitcast i16* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
-; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @loadUnaligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 1
- ret i32 %tmp1
-; CHECK: %0 = bitcast i32* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
-; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 1
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @loadUnaligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 1
- ret i64 %tmp1
-; CHECK: %0 = bitcast i64* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
-; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 1
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @loadUnaligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 1
- ret i128 %tmp1
-; CHECK: %0 = bitcast i128* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
-; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 1
-; CHECK-NEXT: ret i128 %tmp1
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Unaligned stores:
-
-define void @storeUnaligned2(i16* %a) {
-entry:
- store i16 1, i16* %a, align 1
- ret void
-; CHECK: %0 = bitcast i16* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_store2(i8* %0)
-; CHECK-NEXT: store i16 1, i16* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-define void @storeUnaligned4(i32* %a) {
-entry:
- store i32 1, i32* %a, align 1
- ret void
-; CHECK: %0 = bitcast i32* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_store4(i8* %0)
-; CHECK-NEXT: store i32 1, i32* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-define void @storeUnaligned8(i64* %a) {
-entry:
- store i64 1, i64* %a, align 1
- ret void
-; CHECK: %0 = bitcast i64* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_store8(i8* %0)
-; CHECK-NEXT: store i64 1, i64* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-define void @storeUnaligned16(i128* %a) {
-entry:
- store i128 1, i128* %a, align 1
- ret void
-; CHECK: %0 = bitcast i128* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_store16(i8* %0)
-; CHECK-NEXT: store i128 1, i128* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Unusual loads and stores:
-
-define x86_fp80 @loadUnalignedFP(x86_fp80* %a) {
-entry:
- %tmp1 = load x86_fp80, x86_fp80* %a, align 1
- ret x86_fp80 %tmp1
-; CHECK: %0 = bitcast x86_fp80* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_loadN(i8* %0, i64 10)
-; CHECK-NEXT: %tmp1 = load x86_fp80, x86_fp80* %a, align 1
-; CHECK-NEXT: ret x86_fp80 %tmp1
-}
-
-define void @storeUnalignedFP(x86_fp80* %a) {
-entry:
- store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
- ret void
-; CHECK: %0 = bitcast x86_fp80* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_storeN(i8* %0, i64 10)
-; CHECK-NEXT: store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
-; CHECK-NEXT: ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Ensure that esan converts memcpy intrinsics to calls:
-
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-
-define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
-entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
- ret void
-; CHECK: define void @memCpyTest
-; CHECK: call i8* @memcpy
-; CHECK: ret void
-}
-
-define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
-entry:
- tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
- ret void
-; CHECK: define void @memMoveTest
-; CHECK: call i8* @memmove
-; CHECK: ret void
-}
-
-define void @memSetTest(i8* nocapture %x) {
-entry:
- tail call void @llvm.memset.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
- ret void
-; CHECK: define void @memSetTest
-; CHECK: call i8* @memset
-; CHECK: ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Ensure that esan doesn't convert element atomic memory intrinsics to
-; calls.
-
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-
-define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
- ; CHECK-LABEL: elementAtomic_memCpyTest
- ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ret void
-}
-
-define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
- ; CHECK-LABEL: elementAtomic_memMoveTest
- ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
- ret void
-}
-
-define void @elementAtomic_memSetTest(i8* nocapture %x) {
- ; CHECK-LABEL: elementAtomic_memSetTest
- ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
- ; CHECK-NEXT: ret void
- tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
- ret void
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Top-level:
-
-; CHECK: define internal void @esan.module_ctor()
-; CHECK: call void @__esan_init(i32 2, i8* null)
-; CHECK: define internal void @esan.module_dtor()
-; CHECK: call void @__esan_exit(i8* null)
+++ /dev/null
-; Test EfficiencySanitizer working set instrumentation without aggressive
-; optimization flags.
-;
-; RUN: opt < %s -esan -esan-working-set -esan-assume-intra-cache-line=0 -S | FileCheck %s
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Intra-cache-line
-
-define i8 @aligned1(i8* %a) {
-entry:
- %tmp1 = load i8, i8* %a, align 1
- ret i8 %tmp1
-; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
-; CHECK: %0 = ptrtoint i8* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i8, i8* %a, align 1
-; CHECK-NEXT: ret i8 %tmp1
-}
-
-define i16 @aligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 2
- ret i16 %tmp1
-; CHECK: %0 = ptrtoint i16* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i16, i16* %a, align 2
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @aligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 4
- ret i32 %tmp1
-; CHECK: %0 = ptrtoint i32* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i32, i32* %a, align 4
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @aligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 8
- ret i64 %tmp1
-; CHECK: %0 = ptrtoint i64* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i64, i64* %a, align 8
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @aligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 16
- ret i128 %tmp1
-; CHECK: %0 = ptrtoint i128* %a to i64
-; CHECK-NEXT: %1 = and i64 %0, 17592186044415
-; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
-; CHECK-NEXT: %3 = lshr i64 %2, 6
-; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: %5 = load i8, i8* %4
-; CHECK-NEXT: %6 = and i8 %5, -127
-; CHECK-NEXT: %7 = icmp ne i8 %6, -127
-; CHECK-NEXT: br i1 %7, label %8, label %11
-; CHECK: %9 = or i8 %5, -127
-; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
-; CHECK-NEXT: br label %11
-; CHECK: %tmp1 = load i128, i128* %a, align 16
-; CHECK-NEXT: ret i128 %tmp1
-}
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Not guaranteed to be intra-cache-line
-
-define i16 @unaligned2(i16* %a) {
-entry:
- %tmp1 = load i16, i16* %a, align 1
- ret i16 %tmp1
-; CHECK: %0 = bitcast i16* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
-; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
-; CHECK-NEXT: ret i16 %tmp1
-}
-
-define i32 @unaligned4(i32* %a) {
-entry:
- %tmp1 = load i32, i32* %a, align 2
- ret i32 %tmp1
-; CHECK: %0 = bitcast i32* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
-; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 2
-; CHECK-NEXT: ret i32 %tmp1
-}
-
-define i64 @unaligned8(i64* %a) {
-entry:
- %tmp1 = load i64, i64* %a, align 4
- ret i64 %tmp1
-; CHECK: %0 = bitcast i64* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
-; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4
-; CHECK-NEXT: ret i64 %tmp1
-}
-
-define i128 @unaligned16(i128* %a) {
-entry:
- %tmp1 = load i128, i128* %a, align 8
- ret i128 %tmp1
-; CHECK: %0 = bitcast i128* %a to i8*
-; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
-; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 8
-; CHECK-NEXT: ret i128 %tmp1
-}
"sanitizer/common_interface_defs.h",
"sanitizer/coverage_interface.h",
"sanitizer/dfsan_interface.h",
- "sanitizer/esan_interface.h",
"sanitizer/hwasan_interface.h",
"sanitizer/linux_syscall_hooks.h",
"sanitizer/lsan_interface.h",
"sanitizer_win_dll_thunk.h",
"sanitizer_win_weak_interception.h",
]
- if (current_cpu == "x64") {
- sources += [ "sanitizer_linux_x86_64.S" ]
- }
}
"CGProfile.cpp",
"ControlHeightReduction.cpp",
"DataFlowSanitizer.cpp",
- "EfficiencySanitizer.cpp",
"GCOVProfiling.cpp",
"HWAddressSanitizer.cpp",
"IndirectCallPromotion.cpp",