#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-static unsigned getFixupKindLog2Size(unsigned Kind) {
+static unsigned getFixupKindSize(unsigned Kind) {
switch (Kind) {
default:
llvm_unreachable("invalid fixup kind!");
+ case FK_NONE:
+ return 0;
case FK_PCRel_1:
case FK_SecRel_1:
case FK_Data_1:
- return 0;
+ return 1;
case FK_PCRel_2:
case FK_SecRel_2:
case FK_Data_2:
- return 1;
+ return 2;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_branch_4byte_pcrel:
case FK_SecRel_4:
case FK_Data_4:
- return 2;
+ return 4;
case FK_PCRel_8:
case FK_SecRel_8:
case FK_Data_8:
case X86::reloc_global_offset_table8:
- return 3;
+ return 8;
}
}
return X86::NumTargetFixupKinds;
}
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
+
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
{"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
return Infos[Kind - FirstTargetFixupKind];
}
+ bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target) override;
+
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
uint64_t Value, bool IsResolved,
const MCSubtargetInfo *STI) const override {
- unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
+ unsigned Size = getFixupKindSize(Fixup.getKind());
assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
// Specifically ignore overflow/underflow as long as the leakage is
// limited to the lower bits. This is to remain compatible with
// other assemblers.
- assert(isIntN(Size * 8 + 1, Value) &&
+ assert((Size == 0 || isIntN(Size * 8 + 1, Value)) &&
"Value does not fit in the Fixup field");
for (unsigned i = 0; i != Size; ++i)
return getRelaxedOpcodeBranch(Inst, is16BitMode);
}
+Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
+ if (STI.getTargetTriple().isOSBinFormatELF()) {
+ if (STI.getTargetTriple().getArch() == Triple::x86_64) {
+ if (Name == "R_X86_64_NONE")
+ return FK_NONE;
+ } else {
+ if (Name == "R_386_NONE")
+ return FK_NONE;
+ }
+ }
+ return MCAsmBackend::getFixupKind(Name);
+}
+
+bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
+ const MCFixup &Fixup,
+ const MCValue &) {
+ return Fixup.getKind() == FK_NONE;
+}
+
bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst,
const MCSubtargetInfo &STI) const {
// Branches can always be relaxed in either mode.
(EMachine != ELF::EM_386) &&
(EMachine != ELF::EM_IAMCU)) {}
-enum X86_64RelType { RT64_64, RT64_32, RT64_32S, RT64_16, RT64_8 };
+enum X86_64RelType { RT64_NONE, RT64_64, RT64_32, RT64_32S, RT64_16, RT64_8 };
static X86_64RelType getType64(unsigned Kind,
MCSymbolRefExpr::VariantKind &Modifier,
switch (Kind) {
default:
llvm_unreachable("Unimplemented");
+ case FK_NONE:
+ return RT64_NONE;
case X86::reloc_global_offset_table8:
Modifier = MCSymbolRefExpr::VK_GOT;
IsPCRel = true;
case MCSymbolRefExpr::VK_None:
case MCSymbolRefExpr::VK_X86_ABS8:
switch (Type) {
+ case RT64_NONE:
+ if (Modifier == MCSymbolRefExpr::VK_None)
+ return ELF::R_X86_64_NONE;
+ llvm_unreachable("Unimplemented");
case RT64_64:
return IsPCRel ? ELF::R_X86_64_PC64 : ELF::R_X86_64_64;
case RT64_32:
case RT64_32S:
case RT64_16:
case RT64_8:
+ case RT64_NONE:
llvm_unreachable("Unimplemented");
}
llvm_unreachable("unexpected relocation type!");
case RT64_32S:
case RT64_16:
case RT64_8:
+ case RT64_NONE:
llvm_unreachable("Unimplemented");
}
llvm_unreachable("unexpected relocation type!");
case RT64_32S:
case RT64_16:
case RT64_8:
+ case RT64_NONE:
llvm_unreachable("Unimplemented");
}
llvm_unreachable("unexpected relocation type!");
case RT64_32S:
case RT64_16:
case RT64_8:
+ case RT64_NONE:
llvm_unreachable("Unimplemented");
}
llvm_unreachable("unexpected relocation type!");
}
}
-enum X86_32RelType { RT32_32, RT32_16, RT32_8 };
+enum X86_32RelType { RT32_NONE, RT32_32, RT32_16, RT32_8 };
static X86_32RelType getType32(X86_64RelType T) {
switch (T) {
+ case RT64_NONE:
+ return RT32_NONE;
case RT64_64:
llvm_unreachable("Unimplemented");
case RT64_32:
case MCSymbolRefExpr::VK_None:
case MCSymbolRefExpr::VK_X86_ABS8:
switch (Type) {
+ case RT32_NONE:
+ if (Modifier == MCSymbolRefExpr::VK_None)
+ return ELF::R_386_NONE;
+ llvm_unreachable("Unimplemented");
case RT32_32:
return IsPCRel ? ELF::R_386_PC32 : ELF::R_386_32;
case RT32_16:
--- /dev/null
+# RUN: llvm-mc -triple=i386-pc-linux-musl %s | FileCheck --check-prefix=PRINT %s
+
+# RUN: llvm-mc -filetype=obj -triple=i386-pc-linux-musl %s -o %t
+# RUN: llvm-readobj -r %t | FileCheck %s
+# RUN: llvm-readelf -x .data %t | FileCheck --check-prefix=HEX %s
+
+# PRINT: .reloc 2, R_386_NONE, .data
+# PRINT-NEXT: .reloc 1, R_386_NONE, foo+4
+# PRINT-NEXT: .reloc 0, R_386_NONE, 8
+
+# X86 relocations use the Elf32_Rel format. Addends are neither stored in the
+# relocation entries nor applied in the referenced locations.
+# CHECK: 0x2 R_386_NONE .data 0x0
+# CHECK-NEXT: 0x1 R_386_NONE foo 0x0
+# CHECK-NEXT: 0x0 R_386_NONE - 0x0
+
+# HEX: 0x00000000 00000000 00000000
+
+.text
+ ret
+ nop
+ nop
+ .reloc 2, R_386_NONE, .data
+ .reloc 1, R_386_NONE, foo+4
+ .reloc 0, R_386_NONE, 8
+
+.data
+.globl foo
+foo:
+ .long 0
+ .long 0
--- /dev/null
+# RUN: llvm-mc -triple=x86_64-pc-linux-musl %s | FileCheck --check-prefix=PRINT %s
+
+# RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux-musl %s -o %t
+# RUN: llvm-readobj -r %t | FileCheck %s
+
+# PRINT: .reloc 2, R_X86_64_NONE, .data
+# PRINT-NEXT: .reloc 1, R_X86_64_NONE, foo+4
+# PRINT-NEXT: .reloc 0, R_X86_64_NONE, 8
+
+# CHECK: 0x2 R_X86_64_NONE .data 0x0
+# CHECK-NEXT: 0x1 R_X86_64_NONE foo 0x4
+# CHECK-NEXT: 0x0 R_X86_64_NONE - 0x8
+
+.text
+ ret
+ nop
+ nop
+ .reloc 2, R_X86_64_NONE, .data
+ .reloc 1, R_X86_64_NONE, foo+4
+ .reloc 0, R_X86_64_NONE, 8
+
+.data
+.globl foo
+foo:
+ .word 0
+ .word 0