virtual bool hasFeature(StringRef Feature) const;
virtual void HandleTargetFeatures(std::vector<std::string> &Features);
virtual const char* getABI() const {
- if (PointerWidth == 64 && SSELevel >= AVX)
+ if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
return "avx";
- else if (PointerWidth == 32 && MMX3DNowLevel == NoMMX3DNow)
+ else if (getTriple().getArch() == llvm::Triple::x86 &&
+ MMX3DNowLevel == NoMMX3DNow)
return "no-mmx";
return "";
}
case CK_AthlonMP:
case CK_Geode:
// Only accept certain architectures when compiling in 32-bit mode.
- if (PointerWidth != 32)
+ if (getTriple().getArch() != llvm::Triple::x86)
return false;
// Fallthrough
// FIXME: This *really* should not be here.
// X86_64 always has SSE2.
- if (PointerWidth == 64)
+ if (getTriple().getArch() == llvm::Triple::x86_64)
Features["sse2"] = Features["sse"] = Features["mmx"] = true;
switch (CPU) {
void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
- if (PointerWidth == 64) {
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
Builder.defineMacro("__amd64__");
Builder.defineMacro("__amd64");
Builder.defineMacro("__x86_64");
break;
}
- if (Opts.MicrosoftExt && PointerWidth == 32) {
+ if (Opts.MicrosoftExt && getTriple().getArch() == llvm::Triple::x86) {
switch (SSELevel) {
case AVX2:
case AVX:
.Case("sse42", SSELevel >= SSE42)
.Case("sse4a", HasSSE4a)
.Case("x86", true)
- .Case("x86_32", PointerWidth == 32)
- .Case("x86_64", PointerWidth == 64)
+ .Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
+ .Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
.Case("xop", HasXOP)
.Case("f16c", HasF16C)
.Default(false);
}
bool HasAVX;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
- ABIInfo(CGT), HasAVX(hasavx) {}
+ ABIInfo(CGT), HasAVX(hasavx),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize() == 8) {
+ }
bool isPassedUsingAVXType(QualType type) const {
unsigned neededInt, neededSSE;
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
}
if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType())
+ if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
Lo = Hi = Integer;
else
Current = Integer;
// returning an 8-byte unit starting with it. See if we can safely use it.
if (IROffset == 0) {
// Pointers and int64's always fill the 8-byte unit.
- if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
return IRType;
// If we have a 1/2/4-byte integer, we can use it only if the rest of the
// have to do this analysis on the source type because we can't depend on
// unions being lowered a specific way etc.
if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
- IRType->isIntegerTy(32)) {
- unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
SourceOffset*8+64, getContext()))