before finalizing instructions.
* x86arch.h (yasm_x86__bc_apply_prefixes): Don't take num_segregs and
prefixes parameters.
* x86bc.c (yasm_x86__bc_apply_prefixes): Move segreg code to...
* x86id.re (yasm_x86__finalize_insn): Here (only user of this code).
(x86_finalize_jmp): Don't pass num_segregs and prefixes.
* x86arch.h (x86_common): New; refactored common bytecode parameters.
(x86_opcode): New; refactored opcode parameters.
(x86_insn): Refactor with x86_common and x86_opcode.
(x86_jmp): Likewise.
* x86id.re (x86_finalize_common, x86_finalize_opcode): New.
(yasm_x86__finalize_insn, x86_finalize_jmp): Use and update substruct refs.
* x86bc.c (x86_common_print, x86_opcode_print): New.
(x86_bc_insn_print, x86_bc_jmp_print): Use and update substruct refs.
(x86_common_resolve): New.
(x86_bc_insn_resolve, x86_bc_jmp_resolve): Use and update substruct refs.
(x86_common_tobytes, x86_opcode_tobytes): New.
(x86_bc_insn_tobytes, x86_bc_jmp_tobytes): Use and update substruct refs.
(yasm_x86__bc_apply_prefixes): Utilize refactor to simplify.
* bytecode.c (bc_insn_finalize): Simplify (or at least level) immediates
and memory operands before passing them off to yasm_arch_finalize_insn().
This may result in some minor performance improvement on complex static
expressions.
* x86arch.h (x86_parse_targetmod): Add X86_FAR_SEGOFF; this is only
generated due to a SEG:OFF immediate being detected during finalize.
(x86_jmp_opcode_sel): Remove JMP_FAR; this is now a separate bytecode.
(x86_jmp): Remove far opcode.
(x86_jmpfar): New bytecode for far jumps.
(yasm_x86__bc_transform_jmpfar): New.
* x86bc.c (x86_bc_callback_jmpfar): New.
(yasm_x86__bc_transform_jmpfar, x86_bc_jmpfar_destroy): New.
(x86_bc_jmp_print): Move far jump code to..
(x86_bc_jmpfar_print): Here (new).
(x86_bc_jmp_resolve, x86_bc_jmpfar_resolve): Likewise (latter new).
(x86_bc_jmp_tobytes, x86_bc_jmpfar_tobytes): Likewise (latter new).
* x86id.re (OPAP_JmpFar): Remove (detected immediately now).
(jmp_insn, call_insn): Update.
(x86_finalize_jmpfar): New.
(x86_finalize_jmp): Remove far jump-related code.
(yasm_x86__finalize_insn): Add check for SEG:OFF immediate operand, and
apply X86_FAR_SEGOFF if necessary. Match OPTM_Far against both X86_FAR
and X86_FAR_SEGOFF. Add shortcut to x86_finalize_jmpfar().
svn path=/trunk/yasm/; revision=1182
bc_insn_finalize(yasm_bytecode *bc, yasm_bytecode *prev_bc)
{
bytecode_insn *insn = (bytecode_insn *)bc->contents;
+ int i;
+ yasm_insn_operand *op;
+
+ /* Simplify the operands' expressions first. */
+ for (i = 0, op = yasm_ops_first(&insn->operands);
+ op && i<insn->num_operands; op = yasm_operand_next(op), i++) {
+ /* Check operand type */
+ switch (op->type) {
+ case YASM_INSN__OPERAND_MEMORY:
+ /* Don't get over-ambitious here; some archs' memory expr
+ * parser are sensitive to the presence of *1, etc, so don't
+ * simplify identities.
+ */
+ if (op->data.ea)
+ op->data.ea->disp =
+ yasm_expr__level_tree(op->data.ea->disp, 1, 0, NULL,
+ NULL, NULL, NULL);
+ break;
+ case YASM_INSN__OPERAND_IMM:
+ op->data.val = yasm_expr_simplify(op->data.val, NULL);
+ break;
+ default:
+ break;
+ }
+ }
yasm_arch_finalize_insn(insn->arch, bc, prev_bc, insn->insn_data,
insn->num_operands, &insn->operands,
X86_NEAR = 1,
X86_SHORT,
X86_FAR,
- X86_TO
+ X86_TO,
+ X86_FAR_SEGOFF /* FAR due to SEG:OFF immediate */
} x86_parse_targetmod;
typedef enum {
JMP_SHORT,
JMP_NEAR,
JMP_SHORT_FORCED,
- JMP_NEAR_FORCED,
- JMP_FAR /* not really relative, but fits here */
+ JMP_NEAR_FORCED
} x86_jmp_opcode_sel;
typedef enum {
unsigned long line);
/* Bytecode types */
+typedef struct x86_common {
+ unsigned char addrsize; /* 0 or =mode_bits => no override */
+ unsigned char opersize; /* 0 or =mode_bits => no override */
+ unsigned char lockrep_pre; /* 0 indicates no prefix */
+
+ unsigned char mode_bits;
+} x86_common;
+
+typedef struct x86_opcode {
+ unsigned char opcode[3]; /* opcode */
+ unsigned char len;
+} x86_opcode;
typedef struct x86_insn {
+ x86_common common; /* common x86 information */
+ x86_opcode opcode;
+
/*@null@*/ yasm_effaddr *ea; /* effective address */
/*@null@*/ yasm_immval *imm; /* immediate or relative value */
- unsigned char opcode[3]; /* opcode */
- unsigned char opcode_len;
-
- unsigned char addrsize; /* 0 or =mode_bits => no override */
- unsigned char opersize; /* 0 or =mode_bits => no override */
- unsigned char lockrep_pre; /* 0 indicates no prefix */
-
unsigned char def_opersize_64; /* default operand size in 64-bit mode */
unsigned char special_prefix; /* "special" prefix (0=none) */
* registers) and the target register is al/ax/eax/rax.
*/
unsigned char shortmov_op;
-
- unsigned char mode_bits;
} x86_insn;
typedef struct x86_jmp {
+ x86_common common; /* common x86 information */
+ x86_opcode shortop, nearop;
+
yasm_expr *target; /* target location */
/*@dependent@*/ yasm_symrec *origin; /* jump origin */
- struct {
- unsigned char opcode[3];
- unsigned char opcode_len; /* 0 = no opc for this version */
- } shortop, nearop, farop;
-
/* which opcode are we using? */
/* The *FORCED forms are specified in the source as such */
x86_jmp_opcode_sel op_sel;
+} x86_jmp;
- unsigned char addrsize; /* 0 or =mode_bits => no override */
- unsigned char opersize; /* 0 indicates no override */
- unsigned char lockrep_pre; /* 0 indicates no prefix */
+/* Direct (immediate) FAR jumps ONLY; indirect FAR jumps get turned into
+ * x86_insn bytecodes; relative jumps turn into x86_jmp bytecodes.
+ * This bytecode is not legal in 64-bit mode.
+ */
+typedef struct x86_jmpfar {
+ x86_common common; /* common x86 information */
+ x86_opcode opcode;
- unsigned char mode_bits;
-} x86_jmp;
+ yasm_expr *segment; /* target segment */
+ yasm_expr *offset; /* target offset */
+} x86_jmpfar;
-void yasm_x86__bc_transform_jmp(yasm_bytecode *bc, x86_jmp *jmp);
void yasm_x86__bc_transform_insn(yasm_bytecode *bc, x86_insn *insn);
+void yasm_x86__bc_transform_jmp(yasm_bytecode *bc, x86_jmp *jmp);
+void yasm_x86__bc_transform_jmpfar(yasm_bytecode *bc, x86_jmpfar *jmpfar);
void yasm_x86__bc_apply_prefixes
- (yasm_bytecode *bc, int num_prefixes, unsigned long **prefixes,
- int num_segregs, const unsigned long *segregs);
+ (yasm_bytecode *bc, int num_prefixes, unsigned long **prefixes);
void yasm_x86__ea_init(yasm_effaddr *ea, unsigned int spare,
/*@null@*/ yasm_symrec *origin);
void *d, yasm_output_expr_func output_expr,
/*@null@*/ yasm_output_reloc_func output_reloc);
+static void x86_bc_jmpfar_destroy(void *contents);
+static void x86_bc_jmpfar_print(const void *contents, FILE *f,
+ int indent_level);
+static yasm_bc_resolve_flags x86_bc_jmpfar_resolve
+ (yasm_bytecode *bc, int save, yasm_calc_bc_dist_func calc_bc_dist);
+static int x86_bc_jmpfar_tobytes
+ (yasm_bytecode *bc, unsigned char **bufp, void *d,
+ yasm_output_expr_func output_expr,
+ /*@null@*/ yasm_output_reloc_func output_reloc);
+
/* Effective address callback structures */
static const yasm_effaddr_callback x86_ea_callback = {
x86_bc_jmp_tobytes
};
+static const yasm_bytecode_callback x86_bc_callback_jmpfar = {
+ x86_bc_jmpfar_destroy,
+ x86_bc_jmpfar_print,
+ yasm_bc_finalize_common,
+ x86_bc_jmpfar_resolve,
+ x86_bc_jmpfar_tobytes
+};
+
int
yasm_x86__set_rex_from_reg(unsigned char *rex, unsigned char *low3,
unsigned long reg, unsigned int bits,
return 0;
}
+void
+yasm_x86__bc_transform_insn(yasm_bytecode *bc, x86_insn *insn)
+{
+ yasm_bc_transform(bc, &x86_bc_callback_insn, insn);
+}
+
void
yasm_x86__bc_transform_jmp(yasm_bytecode *bc, x86_jmp *jmp)
{
}
void
-yasm_x86__bc_transform_insn(yasm_bytecode *bc, x86_insn *insn)
+yasm_x86__bc_transform_jmpfar(yasm_bytecode *bc, x86_jmpfar *jmpfar)
{
- yasm_bc_transform(bc, &x86_bc_callback_insn, insn);
+ yasm_bc_transform(bc, &x86_bc_callback_jmpfar, jmpfar);
}
void
void
yasm_x86__bc_apply_prefixes(yasm_bytecode *bc, int num_prefixes,
- unsigned long **prefixes, int num_segregs,
- const unsigned long *segregs)
+ unsigned long **prefixes)
{
- x86_insn *insn = (x86_insn *)bc->contents;
- x86_jmp *jmp = (x86_jmp *)bc->contents;
+ x86_common *common = (x86_common *)bc->contents;
int i;
- unsigned char *opersize, *addrsize, *lockrep_pre;
-
- /* Set pointers appropriately for bytecode type */
- if (bc->callback == &x86_bc_callback_insn) {
- opersize = &insn->opersize;
- addrsize = &insn->addrsize;
- lockrep_pre = &insn->lockrep_pre;
- } else if (bc->callback == &x86_bc_callback_jmp) {
- opersize = &jmp->opersize;
- addrsize = &jmp->addrsize;
- lockrep_pre = &jmp->lockrep_pre;
- } else
- yasm_internal_error(N_("Prefixes applied to non-instruction"));
for (i=0; i<num_prefixes; i++) {
switch ((x86_parse_insn_prefix)prefixes[i][0]) {
case X86_LOCKREP:
- if (*lockrep_pre != 0)
+ if (common->lockrep_pre != 0)
yasm__warning(YASM_WARN_GENERAL, bc->line,
N_("multiple LOCK or REP prefixes, using leftmost"));
- *lockrep_pre = (unsigned char)prefixes[i][1];
+ common->lockrep_pre = (unsigned char)prefixes[i][1];
break;
case X86_ADDRSIZE:
- *addrsize = (unsigned char)prefixes[i][1];
+ common->addrsize = (unsigned char)prefixes[i][1];
break;
case X86_OPERSIZE:
- *opersize = (unsigned char)prefixes[i][1];
+ common->opersize = (unsigned char)prefixes[i][1];
break;
}
}
-
- if (bc->callback == &x86_bc_callback_insn)
- for (i=0; i<num_segregs; i++)
- yasm_ea_set_segreg(insn->ea, segregs[i], bc->line);
}
static void
yasm_xfree(contents);
}
+static void
+x86_bc_jmpfar_destroy(void *contents)
+{
+ x86_jmpfar *jmpfar = (x86_jmpfar *)contents;
+ yasm_expr_destroy(jmpfar->segment);
+ yasm_expr_destroy(jmpfar->offset);
+ yasm_xfree(contents);
+}
+
static void
x86_ea_destroy(yasm_effaddr *ea)
{
(unsigned int)x86_ea->need_sib);
}
+static void
+x86_common_print(const x86_common *common, FILE *f, int indent_level)
+{
+ fprintf(f, "%*sAddrSize=%u OperSize=%u LockRepPre=%02x BITS=%u\n",
+ indent_level, "",
+ (unsigned int)common->addrsize,
+ (unsigned int)common->opersize,
+ (unsigned int)common->lockrep_pre,
+ (unsigned int)common->mode_bits);
+}
+
+static void
+x86_opcode_print(const x86_opcode *opcode, FILE *f, int indent_level)
+{
+ fprintf(f, "%*sOpcode: %02x %02x %02x OpLen=%u\n", indent_level, "",
+ (unsigned int)opcode->opcode[0],
+ (unsigned int)opcode->opcode[1],
+ (unsigned int)opcode->opcode[2],
+ (unsigned int)opcode->len);
+}
+
static void
x86_bc_insn_print(const void *contents, FILE *f, int indent_level)
{
(unsigned int)insn->imm->sign);
indent_level--;
}
- fprintf(f, "%*sOpcode: %02x %02x %02x OpLen=%u\n", indent_level,
- "", (unsigned int)insn->opcode[0],
- (unsigned int)insn->opcode[1],
- (unsigned int)insn->opcode[2],
- (unsigned int)insn->opcode_len);
- fprintf(f,
- "%*sAddrSize=%u OperSize=%u LockRepPre=%02x SpPre=%02x REX=%03o\n",
- indent_level, "",
- (unsigned int)insn->addrsize,
- (unsigned int)insn->opersize,
- (unsigned int)insn->lockrep_pre,
+ x86_opcode_print(&insn->opcode, f, indent_level);
+ x86_common_print(&insn->common, f, indent_level);
+ fprintf(f, "%*sSpPre=%02x REX=%03o ShiftOp=%u\n", indent_level, "",
(unsigned int)insn->special_prefix,
- (unsigned int)insn->rex);
- fprintf(f, "%*sShiftOp=%u BITS=%u\n", indent_level, "",
- (unsigned int)insn->shift_op,
- (unsigned int)insn->mode_bits);
+ (unsigned int)insn->rex,
+ (unsigned int)insn->shift_op);
}
static void
fprintf(f, "%*sOrigin=\n", indent_level, "");
yasm_symrec_print(jmp->origin, f, indent_level+1);
fprintf(f, "\n%*sShort Form:\n", indent_level, "");
- if (jmp->shortop.opcode_len == 0)
+ if (jmp->shortop.len == 0)
fprintf(f, "%*sNone\n", indent_level+1, "");
else
- fprintf(f, "%*sOpcode: %02x %02x %02x OpLen=%u\n",
- indent_level+1, "",
- (unsigned int)jmp->shortop.opcode[0],
- (unsigned int)jmp->shortop.opcode[1],
- (unsigned int)jmp->shortop.opcode[2],
- (unsigned int)jmp->shortop.opcode_len);
+ x86_opcode_print(&jmp->shortop, f, indent_level+1);
fprintf(f, "%*sNear Form:\n", indent_level, "");
- if (jmp->nearop.opcode_len == 0)
- fprintf(f, "%*sNone\n", indent_level+1, "");
- else
- fprintf(f, "%*sOpcode: %02x %02x %02x OpLen=%u\n",
- indent_level+1, "",
- (unsigned int)jmp->nearop.opcode[0],
- (unsigned int)jmp->nearop.opcode[1],
- (unsigned int)jmp->nearop.opcode[2],
- (unsigned int)jmp->nearop.opcode_len);
- fprintf(f, "%*sFar Form:\n", indent_level, "");
- if (jmp->farop.opcode_len == 0)
+ if (jmp->nearop.len == 0)
fprintf(f, "%*sNone\n", indent_level+1, "");
else
- fprintf(f, "%*sOpcode: %02x %02x %02x OpLen=%u\n",
- indent_level+1, "",
- (unsigned int)jmp->farop.opcode[0],
- (unsigned int)jmp->farop.opcode[1],
- (unsigned int)jmp->farop.opcode[2],
- (unsigned int)jmp->farop.opcode_len);
+ x86_opcode_print(&jmp->nearop, f, indent_level+1);
fprintf(f, "%*sOpSel=", indent_level, "");
switch (jmp->op_sel) {
case JMP_NONE:
case JMP_NEAR_FORCED:
fprintf(f, "Forced Near");
break;
- case JMP_FAR:
- fprintf(f, "Far");
- break;
default:
fprintf(f, "UNKNOWN!!");
break;
}
- fprintf(f, "\n%*sAddrSize=%u OperSize=%u LockRepPre=%02x\n",
- indent_level, "",
- (unsigned int)jmp->addrsize,
- (unsigned int)jmp->opersize,
- (unsigned int)jmp->lockrep_pre);
- fprintf(f, "%*sBITS=%u\n", indent_level, "",
- (unsigned int)jmp->mode_bits);
+ x86_common_print(&jmp->common, f, indent_level);
+}
+
+static void
+x86_bc_jmpfar_print(const void *contents, FILE *f, int indent_level)
+{
+ const x86_jmpfar *jmpfar = (const x86_jmpfar *)contents;
+
+ fprintf(f, "%*s_Far_Jump_\n", indent_level, "");
+ fprintf(f, "%*sSegment=", indent_level, "");
+ yasm_expr_print(jmpfar->segment, f);
+ fprintf(f, "\n%*sOffset=", indent_level, "");
+ yasm_expr_print(jmpfar->offset, f);
+ x86_opcode_print(&jmpfar->opcode, f, indent_level);
+ x86_common_print(&jmpfar->common, f, indent_level);
+}
+
+static unsigned int
+x86_common_resolve(const x86_common *common)
+{
+ unsigned int len = 0;
+
+ if (common->addrsize != 0 && common->addrsize != common->mode_bits)
+ len++;
+ if (common->opersize != 0 &&
+ ((common->mode_bits != 64 && common->opersize != common->mode_bits) ||
+ (common->mode_bits == 64 && common->opersize == 16)))
+ len++;
+ if (common->lockrep_pre != 0)
+ len++;
+
+ return len;
}
static yasm_bc_resolve_flags
assert(temp != NULL);
/* Handle shortmov special-casing */
- if (insn->shortmov_op && insn->mode_bits == 64 &&
- insn->addrsize == 32 &&
+ if (insn->shortmov_op && insn->common.mode_bits == 64 &&
+ insn->common.addrsize == 32 &&
!yasm_expr__contains(temp, YASM_EXPR_REG)) {
yasm_x86__ea_set_disponly((yasm_effaddr *)&eat);
if (save) {
/* Make the short form permanent. */
- insn->opcode[0] = insn->opcode[1];
+ insn->opcode.opcode[0] = insn->opcode.opcode[1];
}
}
* of the Mod/RM byte until we know more about the
* displacement.
*/
- switch (yasm_x86__expr_checkea(&temp, &insn->addrsize,
- insn->mode_bits, ea->nosplit, &displen, &eat.modrm,
+ switch (yasm_x86__expr_checkea(&temp, &insn->common.addrsize,
+ insn->common.mode_bits, ea->nosplit, &displen, &eat.modrm,
&eat.valid_modrm, &eat.need_modrm, &eat.sib,
&eat.valid_sib, &eat.need_sib, &eat.pcrel, &insn->rex,
calc_bc_dist)) {
/* Handle unknown case, make displen word-sized */
if (displen == 0xff)
- displen = (insn->addrsize == 16) ? 2U : 4U;
+ displen = (insn->common.addrsize == 16) ? 2U : 4U;
}
/* If we had forced ea->len but had to override, save it now */
if (save) {
/* Make the ,1 form permanent. */
- insn->opcode[0] = insn->opcode[1];
+ insn->opcode.opcode[0] = insn->opcode.opcode[1];
/* Delete imm, as it's not needed. */
yasm_expr_destroy(imm->val);
yasm_xfree(imm);
bc->len += immlen;
}
- bc->len += insn->opcode_len;
- bc->len += (insn->addrsize != 0 && insn->addrsize != insn->mode_bits) ? 1:0;
- if (insn->opersize != 0 &&
- ((insn->mode_bits != 64 && insn->opersize != insn->mode_bits) ||
- (insn->mode_bits == 64 && insn->opersize == 16)))
- bc->len++;
+ bc->len += insn->opcode.len;
+ bc->len += x86_common_resolve(&insn->common);
bc->len += (insn->special_prefix != 0) ? 1:0;
- bc->len += (insn->lockrep_pre != 0) ? 1:0;
if (insn->rex != 0xff &&
(insn->rex != 0 ||
- (insn->mode_bits == 64 && insn->opersize == 64 &&
+ (insn->common.mode_bits == 64 && insn->common.opersize == 64 &&
insn->def_opersize_64 != 64)))
bc->len++;
x86_jmp_opcode_sel jrtype = JMP_NONE;
/* As opersize may be 0, figure out its "real" value. */
- opersize = (jmp->opersize == 0) ? jmp->mode_bits : jmp->opersize;
+ opersize = (jmp->common.opersize == 0) ?
+ jmp->common.mode_bits : jmp->common.opersize;
/* We only check to see if forced forms are actually legal if we're in
* save mode. Otherwise we assume that they are legal.
return YASM_BC_RESOLVE_ERROR | YASM_BC_RESOLVE_UNKNOWN_LEN;
} else {
rel = yasm_intnum_get_int(num);
- rel -= jmp->shortop.opcode_len+1;
+ rel -= jmp->shortop.len+1;
yasm_expr_destroy(temp);
/* does a short form exist? */
- if (jmp->shortop.opcode_len == 0) {
+ if (jmp->shortop.len == 0) {
yasm__error(bc->line, N_("short jump does not exist"));
return YASM_BC_RESOLVE_ERROR |
YASM_BC_RESOLVE_UNKNOWN_LEN;
/* 2/4 byte relative displacement (depending on operand size) */
jrtype = JMP_NEAR;
if (save) {
- if (jmp->nearop.opcode_len == 0) {
+ if (jmp->nearop.len == 0) {
yasm__error(bc->line, N_("near jump does not exist"));
return YASM_BC_RESOLVE_ERROR | YASM_BC_RESOLVE_UNKNOWN_LEN;
}
break;
default:
temp = yasm_expr_copy(jmp->target);
- temp = yasm_expr_simplify(temp, NULL);
-
- /* Check for far displacement (seg:off). */
- if (yasm_expr_is_op(temp, YASM_EXPR_SEGOFF)) {
- jrtype = JMP_FAR;
- break; /* length handled below */
- } else if (jmp->op_sel == JMP_FAR) {
- yasm__error(bc->line,
- N_("far jump does not have a far displacement"));
- return YASM_BC_RESOLVE_ERROR | YASM_BC_RESOLVE_UNKNOWN_LEN;
- }
/* Try to find shortest displacement based on difference between
* target expr value and our (this bytecode's) offset. Note this
num = yasm_expr_get_intnum(&temp, calc_bc_dist);
if (num) {
rel = yasm_intnum_get_int(num);
- rel -= jmp->shortop.opcode_len+1;
+ rel -= jmp->shortop.len+1;
/* short displacement must fit within -128 <= rel <= +127 */
- if (jmp->shortop.opcode_len != 0 && rel >= -128 &&
- rel <= 127) {
+ if (jmp->shortop.len != 0 && rel >= -128 && rel <= 127) {
/* It fits into a short displacement. */
jrtype = JMP_SHORT;
- } else if (jmp->nearop.opcode_len != 0) {
+ } else if (jmp->nearop.len != 0) {
/* Near for now, but could get shorter in the future if
* there's a short form available.
*/
jrtype = JMP_NEAR;
- if (jmp->shortop.opcode_len != 0)
+ if (jmp->shortop.len != 0)
retval = YASM_BC_RESOLVE_NONE;
} else {
/* Doesn't fit into short, and there's no near opcode.
* opcode is not available, use a short opcode instead.
* If we're saving, error if a near opcode is not available.
*/
- if (jmp->nearop.opcode_len != 0) {
- if (jmp->shortop.opcode_len != 0)
+ if (jmp->nearop.len != 0) {
+ if (jmp->shortop.len != 0)
retval = YASM_BC_RESOLVE_NONE;
jrtype = JMP_NEAR;
} else {
case JMP_SHORT:
if (save)
jmp->op_sel = JMP_SHORT;
- if (jmp->shortop.opcode_len == 0)
+ if (jmp->shortop.len == 0)
return YASM_BC_RESOLVE_UNKNOWN_LEN; /* size not available */
- bc->len += jmp->shortop.opcode_len + 1;
+ bc->len += jmp->shortop.len + 1;
break;
case JMP_NEAR:
if (save)
jmp->op_sel = JMP_NEAR;
- if (jmp->nearop.opcode_len == 0)
+ if (jmp->nearop.len == 0)
return YASM_BC_RESOLVE_UNKNOWN_LEN; /* size not available */
- bc->len += jmp->nearop.opcode_len;
- bc->len += (opersize == 16) ? 2 : 4;
- break;
- case JMP_FAR:
- if (save)
- jmp->op_sel = JMP_FAR;
- if (jmp->farop.opcode_len == 0)
- return YASM_BC_RESOLVE_UNKNOWN_LEN; /* size not available */
-
- bc->len += jmp->farop.opcode_len;
- bc->len += 2; /* segment */
+ bc->len += jmp->nearop.len;
bc->len += (opersize == 16) ? 2 : 4;
break;
default:
yasm_internal_error(N_("unknown jump type"));
}
- bc->len += (jmp->addrsize != 0 && jmp->addrsize != jmp->mode_bits) ? 1:0;
- bc->len += (jmp->opersize != 0 && jmp->opersize != jmp->mode_bits) ? 1:0;
- bc->len += (jmp->lockrep_pre != 0) ? 1:0;
+ bc->len += x86_common_resolve(&jmp->common);
return retval;
}
+static yasm_bc_resolve_flags
+x86_bc_jmpfar_resolve(yasm_bytecode *bc, int save,
+ yasm_calc_bc_dist_func calc_bc_dist)
+{
+ x86_jmpfar *jmpfar = (x86_jmpfar *)bc->contents;
+ unsigned char opersize;
+
+ opersize = (jmpfar->common.opersize == 0) ?
+ jmpfar->common.mode_bits : jmpfar->common.opersize;
+
+ bc->len += jmpfar->opcode.len;
+ bc->len += 2; /* segment */
+ bc->len += (opersize == 16) ? 2 : 4;
+ bc->len += x86_common_resolve(&jmpfar->common);
+
+ return YASM_BC_RESOLVE_MIN_LEN;
+}
+
+static void
+x86_common_tobytes(const x86_common *common, unsigned char **bufp,
+ unsigned int segreg)
+{
+ if (common->lockrep_pre != 0)
+ YASM_WRITE_8(*bufp, common->lockrep_pre);
+ if (segreg != 0)
+ YASM_WRITE_8(*bufp, (unsigned char)segreg);
+ if (common->opersize != 0 &&
+ ((common->mode_bits != 64 && common->opersize != common->mode_bits) ||
+ (common->mode_bits == 64 && common->opersize == 16)))
+ YASM_WRITE_8(*bufp, 0x66);
+ if (common->addrsize != 0 && common->addrsize != common->mode_bits)
+ YASM_WRITE_8(*bufp, 0x67);
+}
+
+static void
+x86_opcode_tobytes(const x86_opcode *opcode, unsigned char **bufp)
+{
+ unsigned int i;
+ for (i=0; i<opcode->len; i++)
+ YASM_WRITE_8(*bufp, opcode->opcode[i]);
+}
+
static int
x86_bc_insn_tobytes(yasm_bytecode *bc, unsigned char **bufp, void *d,
yasm_output_expr_func output_expr,
/* Prefixes */
if (insn->special_prefix != 0)
YASM_WRITE_8(*bufp, insn->special_prefix);
- if (insn->lockrep_pre != 0)
- YASM_WRITE_8(*bufp, insn->lockrep_pre);
- if (x86_ea && ea->segreg != 0)
- YASM_WRITE_8(*bufp, (unsigned char)(ea->segreg>>8));
- if (insn->opersize != 0 &&
- ((insn->mode_bits != 64 && insn->opersize != insn->mode_bits) ||
- (insn->mode_bits == 64 && insn->opersize == 16)))
- YASM_WRITE_8(*bufp, 0x66);
- if (insn->addrsize != 0 && insn->addrsize != insn->mode_bits)
- YASM_WRITE_8(*bufp, 0x67);
+ x86_common_tobytes(&insn->common, bufp, ea ? (ea->segreg>>8) : 0);
if (insn->rex != 0xff) {
- if (insn->mode_bits == 64 && insn->opersize == 64 &&
+ if (insn->common.mode_bits == 64 && insn->common.opersize == 64 &&
insn->def_opersize_64 != 64)
insn->rex |= 0x48;
if (insn->rex != 0) {
- if (insn->mode_bits != 64)
+ if (insn->common.mode_bits != 64)
yasm_internal_error(
N_("x86: got a REX prefix in non-64-bit mode"));
YASM_WRITE_8(*bufp, insn->rex);
}
/* Opcode */
- for (i=0; i<insn->opcode_len; i++)
- YASM_WRITE_8(*bufp, insn->opcode[i]);
+ x86_opcode_tobytes(&insn->opcode, bufp);
/* Effective address: ModR/M (if required), SIB (if required), and
* displacement (if required).
if (ea->disp) {
x86_effaddr eat = *x86_ea; /* structure copy */
unsigned char displen = ea->len;
- unsigned char addrsize = insn->addrsize;
+ unsigned char addrsize = insn->common.addrsize;
eat.valid_modrm = 0; /* force checkea to actually run */
* displacement. Throw away all of the return values except for
* the modified expr.
*/
- if (yasm_x86__expr_checkea(&ea->disp, &addrsize, insn->mode_bits,
- ea->nosplit, &displen, &eat.modrm,
- &eat.valid_modrm, &eat.need_modrm,
- &eat.sib, &eat.valid_sib,
- &eat.need_sib, &eat.pcrel, &insn->rex,
+ if (yasm_x86__expr_checkea(&ea->disp, &addrsize,
+ insn->common.mode_bits, ea->nosplit,
+ &displen, &eat.modrm, &eat.valid_modrm,
+ &eat.need_modrm, &eat.sib,
+ &eat.valid_sib, &eat.need_sib,
+ &eat.pcrel, &insn->rex,
yasm_common_calc_bc_dist))
yasm_internal_error(N_("checkea failed"));
unsigned char opersize;
unsigned int i;
unsigned char *bufp_orig = *bufp;
- /*@null@*/ yasm_expr *targetseg;
/*@null@*/ yasm_expr *wrt;
- yasm_expr *dup;
/* Prefixes */
- if (jmp->lockrep_pre != 0)
- YASM_WRITE_8(*bufp, jmp->lockrep_pre);
- /* FIXME: branch hints! */
- if (jmp->opersize != 0 && jmp->opersize != jmp->mode_bits)
- YASM_WRITE_8(*bufp, 0x66);
- if (jmp->addrsize != 0 && jmp->addrsize != jmp->mode_bits)
- YASM_WRITE_8(*bufp, 0x67);
+ x86_common_tobytes(&jmp->common, bufp, 0);
/* As opersize may be 0, figure out its "real" value. */
- opersize = (jmp->opersize == 0) ? jmp->mode_bits : jmp->opersize;
+ opersize = (jmp->common.opersize == 0) ?
+ jmp->common.mode_bits : jmp->common.opersize;
/* Check here to see if forced forms are actually legal. */
switch (jmp->op_sel) {
case JMP_SHORT_FORCED:
case JMP_SHORT:
/* 1 byte relative displacement */
- if (jmp->shortop.opcode_len == 0)
+ if (jmp->shortop.len == 0)
yasm_internal_error(N_("short jump does not exist"));
/* Opcode */
- for (i=0; i<jmp->shortop.opcode_len; i++)
- YASM_WRITE_8(*bufp, jmp->shortop.opcode[i]);
+ x86_opcode_tobytes(&jmp->shortop, bufp);
/* Relative displacement */
wrt = yasm_expr_extract_wrt(&jmp->target);
case JMP_NEAR_FORCED:
case JMP_NEAR:
/* 2/4 byte relative displacement (depending on operand size) */
- if (jmp->nearop.opcode_len == 0) {
+ if (jmp->nearop.len == 0) {
yasm__error(bc->line, N_("near jump does not exist"));
return 1;
}
/* Opcode */
- for (i=0; i<jmp->nearop.opcode_len; i++)
- YASM_WRITE_8(*bufp, jmp->nearop.opcode[i]);
+ x86_opcode_tobytes(&jmp->nearop, bufp);
/* Relative displacement */
wrt = yasm_expr_extract_wrt(&jmp->target);
(unsigned long)(*bufp-bufp_orig), bc, 1, 1, d))
return 1;
*bufp += i;
- break;
- case JMP_FAR:
- /* far absolute (4/6 byte depending on operand size) */
- if (jmp->farop.opcode_len == 0) {
- yasm__error(bc->line, N_("far jump does not exist"));
- return 1;
- }
-
- /* Opcode */
- for (i=0; i<jmp->farop.opcode_len; i++)
- YASM_WRITE_8(*bufp, jmp->farop.opcode[i]);
-
- /* Absolute displacement: segment and offset */
- jmp->target = yasm_expr_simplify(jmp->target, NULL);
- dup = yasm_expr_copy(jmp->target);
- targetseg = yasm_expr_extract_segoff(&dup);
- if (!targetseg)
- yasm_internal_error(N_("could not extract segment for far jump"));
- i = (opersize == 16) ? 2 : 4;
- if (output_expr(&dup, *bufp, i, i*8, 0,
- (unsigned long)(*bufp-bufp_orig), bc, 0, 1, d))
- return 1;
- *bufp += i;
- if (output_expr(&targetseg, *bufp, 2, 2*8, 0,
- (unsigned long)(*bufp-bufp_orig), bc, 0, 1, d))
- return 1;
- *bufp += 2;
-
- yasm_expr_destroy(dup);
- yasm_expr_destroy(targetseg);
-
break;
default:
yasm_internal_error(N_("unrecognized relative jump op_sel"));
return 0;
}
+static int
+x86_bc_jmpfar_tobytes(yasm_bytecode *bc, unsigned char **bufp, void *d,
+ yasm_output_expr_func output_expr,
+ /*@unused@*/ yasm_output_reloc_func output_reloc)
+{
+ x86_jmpfar *jmpfar = (x86_jmpfar *)bc->contents;
+ unsigned int i;
+ unsigned char *bufp_orig = *bufp;
+ unsigned char opersize;
+
+ x86_common_tobytes(&jmpfar->common, bufp, 0);
+ x86_opcode_tobytes(&jmpfar->opcode, bufp);
+
+ /* As opersize may be 0, figure out its "real" value. */
+ opersize = (jmpfar->common.opersize == 0) ?
+ jmpfar->common.mode_bits : jmpfar->common.opersize;
+
+ /* Absolute displacement: segment and offset */
+ i = (opersize == 16) ? 2 : 4;
+ if (output_expr(&jmpfar->offset, *bufp, i, i*8, 0,
+ (unsigned long)(*bufp-bufp_orig), bc, 0, 1, d))
+ return 1;
+ *bufp += i;
+ if (output_expr(&jmpfar->segment, *bufp, 2, 2*8, 0,
+ (unsigned long)(*bufp-bufp_orig), bc, 0, 1, d))
+ return 1;
+ *bufp += 2;
+
+ return 0;
+}
+
int
yasm_x86__intnum_fixup_rel(yasm_arch *arch, yasm_intnum *intn, size_t valsize,
const yasm_bytecode *bc, unsigned long line)
* 0 = no target mod acceptable
* 1 = NEAR
* 2 = SHORT
- * 3 = FAR
+ * 3 = FAR (or SEG:OFF immediate)
* 4 = TO
* - 1 bit = effective address size
* 0 = any address size allowed except for 64-bit
* [special case for imul opcode]
* 8 = relative jump (outputs a jmp instead of normal insn)
* 9 = operand size goes into address size (jmp only)
+ * A = far jump (outputs a farjmp instead of normal insn)
* The below describes postponed actions: actions which can't be completed at
* parse-time due to things like EQU and complex expressions. For these, some
* additional data (stored in the second byte of the opcode with a one-byte
* 0 = none
* 1 = shift operation with a ,1 short form (instead of imm8).
* 2 = large imm16/32 that can become a sign-extended imm8.
- * 3 = can be far jump
- * 4 = could become a short opcode mov with bits=64 and a32 prefix
+ * 3 = could become a short opcode mov with bits=64 and a32 prefix
*/
#define OPT_Imm 0x0
#define OPT_Reg 0x1
#define OPA_SpareEA (7UL<<13)
#define OPA_JmpRel (8UL<<13)
#define OPA_AdSizeR (9UL<<13)
+#define OPA_JmpFar (0xAUL<<13)
#define OPA_MASK (0xFUL<<13)
#define OPAP_None (0UL<<17)
#define OPAP_ShiftOp (1UL<<17)
#define OPAP_SImm8Avail (2UL<<17)
-#define OPAP_JmpFar (3UL<<17)
-#define OPAP_ShortMov (4UL<<17)
+#define OPAP_ShortMov (3UL<<17)
#define OPAP_MASK (7UL<<17)
typedef struct x86_insn_info {
{ CPU_Hammer|CPU_64, 0, 64, 0, 0, 0, {0, 0, 0}, 0, 1,
{OPT_Imm|OPS_32|OPA_JmpRel, 0, 0} },
- { CPU_Any, 0, 16, 64, 0, 1, {0xE8, 0x9A, 0}, 0, 1,
- {OPT_Imm|OPS_16|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
- { CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0xE8, 0x9A, 0}, 0, 1,
- {OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
+ { CPU_Any, 0, 16, 64, 0, 1, {0xE8, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_16|OPTM_Near|OPA_JmpRel, 0, 0} },
+ { CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0xE8, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel, 0, 0} },
{ CPU_Hammer|CPU_64, 0, 64, 64, 0, 1, {0xE8, 0, 0}, 0, 1,
{OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel, 0, 0} },
- { CPU_Any, 0, 0, 64, 0, 1, {0xE8, 0x9A, 0}, 0, 1,
- {OPT_Imm|OPS_Any|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
+ { CPU_Any, 0, 0, 64, 0, 1, {0xE8, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_Any|OPTM_Near|OPA_JmpRel, 0, 0} },
{ CPU_Any, 0, 16, 0, 0, 1, {0xFF, 0, 0}, 2, 1,
{OPT_RM|OPS_16|OPA_EA, 0, 0} },
{OPT_Mem|OPS_Any|OPTM_Near|OPA_EA, 0, 0} },
{ CPU_Not64, 0, 16, 0, 0, 1, {0x9A, 0, 0}, 3, 1,
- {OPT_Imm|OPS_16|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_16|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0x9A, 0, 0}, 3, 1,
- {OPT_Imm|OPS_32|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_32|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_Not64, 0, 0, 0, 0, 1, {0x9A, 0, 0}, 3, 1,
- {OPT_Imm|OPS_Any|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_Any|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_Any, 0, 16, 0, 0, 1, {0xFF, 0, 0}, 3, 1,
{OPT_Mem|OPS_16|OPTM_Far|OPA_EA, 0, 0} },
{ CPU_Any, 0, 0, 64, 0, 1, {0xEB, 0, 0}, 0, 1,
{OPT_Imm|OPS_Any|OPTM_Short|OPA_JmpRel, 0, 0} },
- { CPU_Any, 0, 16, 64, 0, 1, {0xE9, 0xEA, 0}, 0, 1,
- {OPT_Imm|OPS_16|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
- { CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0xE9, 0xEA, 0}, 0, 1,
- {OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
+ { CPU_Any, 0, 16, 64, 0, 1, {0xE9, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_16|OPTM_Near|OPA_JmpRel, 0, 0} },
+ { CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0xE9, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel, 0, 0} },
{ CPU_Hammer|CPU_64, 0, 64, 64, 0, 1, {0xE9, 0, 0}, 0, 1,
{OPT_Imm|OPS_32|OPTM_Near|OPA_JmpRel, 0, 0} },
- { CPU_Any, 0, 0, 64, 0, 1, {0xE9, 0xEA, 0}, 0, 1,
- {OPT_Imm|OPS_Any|OPTM_Near|OPA_JmpRel|OPAP_JmpFar, 0, 0} },
+ { CPU_Any, 0, 0, 64, 0, 1, {0xE9, 0, 0}, 0, 1,
+ {OPT_Imm|OPS_Any|OPTM_Near|OPA_JmpRel, 0, 0} },
{ CPU_Any, 0, 16, 64, 0, 1, {0xFF, 0, 0}, 4, 1,
{OPT_RM|OPS_16|OPA_EA, 0, 0} },
{OPT_Mem|OPS_Any|OPTM_Near|OPA_EA, 0, 0} },
{ CPU_Not64, 0, 16, 0, 0, 1, {0xEA, 0, 0}, 3, 1,
- {OPT_Imm|OPS_16|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_16|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_386|CPU_Not64, 0, 32, 0, 0, 1, {0xEA, 0, 0}, 3, 1,
- {OPT_Imm|OPS_32|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_32|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_Not64, 0, 0, 0, 0, 1, {0xEA, 0, 0}, 3, 1,
- {OPT_Imm|OPS_Any|OPTM_Far|OPA_JmpRel, 0, 0} },
+ {OPT_Imm|OPS_Any|OPTM_Far|OPA_JmpFar, 0, 0} },
{ CPU_Any, 0, 16, 0, 0, 1, {0xFF, 0, 0}, 5, 1,
{OPT_Mem|OPS_16|OPTM_Far|OPA_EA, 0, 0} },
};
+static void
+x86_finalize_common(x86_common *common, x86_insn_info *info,
+ unsigned int mode_bits)
+{
+ common->addrsize = 0;
+ common->opersize = info->opersize;
+ common->lockrep_pre = 0;
+ common->mode_bits = (unsigned char)mode_bits;
+}
+
+static void
+x86_finalize_opcode(x86_opcode *opcode, x86_insn_info *info)
+{
+ opcode->len = info->opcode_len;
+ opcode->opcode[0] = info->opcode[0];
+ opcode->opcode[1] = info->opcode[1];
+ opcode->opcode[2] = info->opcode[2];
+}
+
+static void
+x86_finalize_jmpfar(yasm_arch *arch, yasm_bytecode *bc,
+ const unsigned long data[4], int num_operands,
+ yasm_insn_operands *operands, int num_prefixes,
+ unsigned long **prefixes, x86_insn_info *info)
+{
+ x86_jmpfar *jmpfar;
+ yasm_insn_operand *op;
+
+ jmpfar = yasm_xmalloc(sizeof(x86_jmpfar));
+ x86_finalize_common(&jmpfar->common, info, data[3]);
+ x86_finalize_opcode(&jmpfar->opcode, info);
+
+ op = yasm_ops_first(operands);
+
+ switch (op->targetmod) {
+ case X86_FAR:
+ /* "FAR imm" target needs to become "seg imm:imm". */
+ jmpfar->offset = yasm_expr_copy(op->data.val);
+ jmpfar->segment = yasm_expr_create_branch(YASM_EXPR_SEG,
+ op->data.val, bc->line);
+ break;
+ case X86_FAR_SEGOFF:
+ /* SEG:OFF expression; split it. */
+ jmpfar->offset = op->data.val;
+ jmpfar->segment = yasm_expr_extract_segoff(&jmpfar->offset);
+ if (!jmpfar->segment)
+ yasm_internal_error(N_("didn't get SEG:OFF expression in jmpfar"));
+ break;
+ default:
+ yasm_internal_error(N_("didn't get FAR expression in jmpfar"));
+ }
+
+ /* Transform the bytecode */
+ yasm_x86__bc_transform_jmpfar(bc, jmpfar);
+ yasm_x86__bc_apply_prefixes(bc, num_prefixes, prefixes);
+}
+
static void
x86_finalize_jmp(yasm_arch *arch, yasm_bytecode *bc, yasm_bytecode *prev_bc,
const unsigned long data[4], int num_operands,
yasm_insn_operands *operands, int num_prefixes,
- unsigned long **prefixes, int num_segregs,
- const unsigned long *segregs, x86_insn_info *jinfo)
+ unsigned long **prefixes, x86_insn_info *jinfo)
{
yasm_arch_x86 *arch_x86 = (yasm_arch_x86 *)arch;
x86_jmp *jmp;
yasm_insn_operand *op;
static const unsigned char size_lookup[] = {0, 8, 16, 32, 64, 80, 128, 0};
- jmp = yasm_xmalloc(sizeof(x86_jmp));
- jmp->mode_bits = mode_bits;
- jmp->lockrep_pre = 0;
-
/* We know the target is in operand 0, but sanity check for Imm. */
op = yasm_ops_first(operands);
if (op->type != YASM_INSN__OPERAND_IMM)
yasm_internal_error(N_("invalid operand conversion"));
- /* Far target needs to become "seg imm:imm". */
- if ((jinfo->operands[0] & OPTM_MASK) == OPTM_Far) {
- yasm_expr *copy = yasm_expr_copy(op->data.val);
- jmp->target = yasm_expr_create_tree(
- yasm_expr_create_branch(YASM_EXPR_SEG, op->data.val, bc->line),
- YASM_EXPR_SEGOFF, copy, bc->line);
- } else
- jmp->target = op->data.val;
+ jmp = yasm_xmalloc(sizeof(x86_jmp));
+ x86_finalize_common(&jmp->common, jinfo, mode_bits);
+ jmp->target = op->data.val;
/* Need to save jump origin for relative jumps. */
jmp->origin = yasm_symtab_define_label2("$", prev_bc, 0, bc->line);
- /* Initially assume no far opcode is available. */
- jmp->farop.opcode_len = 0;
-
/* See if the user explicitly specified short/near/far. */
switch ((int)(jinfo->operands[0] & OPTM_MASK)) {
case OPTM_Short:
case OPTM_Near:
jmp->op_sel = JMP_NEAR_FORCED;
break;
- case OPTM_Far:
- jmp->op_sel = JMP_FAR;
- jmp->farop.opcode_len = info->opcode_len;
- jmp->farop.opcode[0] = info->opcode[0];
- jmp->farop.opcode[1] = info->opcode[1];
- jmp->farop.opcode[2] = info->opcode[2];
- break;
default:
jmp->op_sel = JMP_NONE;
}
- /* Set operand size */
- jmp->opersize = jinfo->opersize;
-
/* Check for address size setting in second operand, if present */
if (jinfo->num_operands > 1 &&
(jinfo->operands[1] & OPA_MASK) == OPA_AdSizeR)
- jmp->addrsize = (unsigned char)size_lookup[(jinfo->operands[1] &
- OPS_MASK)>>OPS_SHIFT];
- else
- jmp->addrsize = 0;
+ jmp->common.addrsize = (unsigned char)
+ size_lookup[(jinfo->operands[1] & OPS_MASK)>>OPS_SHIFT];
/* Check for address size override */
if (jinfo->modifiers & MOD_AdSizeR)
- jmp->addrsize = (unsigned char)(mod_data & 0xFF);
+ jmp->common.addrsize = (unsigned char)(mod_data & 0xFF);
/* Scan through other infos for this insn looking for short/near versions.
* Needs to match opersize and number of operands, also be within CPU.
*/
- jmp->shortop.opcode_len = 0;
- jmp->nearop.opcode_len = 0;
- for (; num_info>0 && (jmp->shortop.opcode_len == 0 ||
- jmp->nearop.opcode_len == 0);
+ jmp->shortop.len = 0;
+ jmp->nearop.len = 0;
+ for (; num_info>0 && (jmp->shortop.len == 0 || jmp->nearop.len == 0);
num_info--, info++) {
unsigned long cpu = info->cpu | data[2];
if ((info->operands[0] & OPA_MASK) != OPA_JmpRel)
continue;
- if (info->opersize != jmp->opersize)
+ if (info->opersize != jmp->common.opersize)
continue;
switch ((int)(info->operands[0] & OPTM_MASK)) {
case OPTM_Short:
- jmp->shortop.opcode_len = info->opcode_len;
- jmp->shortop.opcode[0] = info->opcode[0];
- jmp->shortop.opcode[1] = info->opcode[1];
- jmp->shortop.opcode[2] = info->opcode[2];
+ x86_finalize_opcode(&jmp->shortop, info);
if (info->modifiers & MOD_Op0Add)
jmp->shortop.opcode[0] += (unsigned char)(mod_data & 0xFF);
break;
case OPTM_Near:
- jmp->nearop.opcode_len = info->opcode_len;
- jmp->nearop.opcode[0] = info->opcode[0];
- jmp->nearop.opcode[1] = info->opcode[1];
- jmp->nearop.opcode[2] = info->opcode[2];
+ x86_finalize_opcode(&jmp->nearop, info);
if (info->modifiers & MOD_Op1Add)
jmp->nearop.opcode[1] += (unsigned char)(mod_data & 0xFF);
- if ((info->operands[0] & OPAP_MASK) == OPAP_JmpFar) {
- jmp->farop.opcode_len = 1;
- jmp->farop.opcode[0] = info->opcode[info->opcode_len];
- }
break;
}
}
- if ((jmp->op_sel == JMP_SHORT_FORCED) && (jmp->nearop.opcode_len == 0))
+ if ((jmp->op_sel == JMP_SHORT_FORCED) && (jmp->nearop.len == 0))
yasm__error(bc->line,
N_("no SHORT form of that jump instruction exists"));
- if ((jmp->op_sel == JMP_NEAR_FORCED) && (jmp->shortop.opcode_len == 0))
+ if ((jmp->op_sel == JMP_NEAR_FORCED) && (jmp->shortop.len == 0))
yasm__error(bc->line,
N_("no NEAR form of that jump instruction exists"));
/* Transform the bytecode */
yasm_x86__bc_transform_jmp(bc, jmp);
- yasm_x86__bc_apply_prefixes(bc, num_prefixes, prefixes, num_segregs,
- segregs);
+ yasm_x86__bc_apply_prefixes(bc, num_prefixes, prefixes);
}
void
int i;
static const unsigned int size_lookup[] = {0, 1, 2, 4, 8, 10, 16, 0};
+ /* First look for SEG:OFF operands and apply X86_FAR_SEGOFF targetmod. */
+ for (i = 0, op = yasm_ops_first(operands); op && i<info->num_operands;
+ op = yasm_operand_next(op), i++) {
+ if (op->type == YASM_INSN__OPERAND_IMM && op->targetmod == 0 &&
+ yasm_expr_is_op(op->data.val, YASM_EXPR_SEGOFF))
+ op->targetmod = X86_FAR_SEGOFF;
+ }
+
/* Just do a simple linear search through the info array for a match.
* First match wins.
*/
mismatch = 1;
break;
case OPTM_Far:
- if (op->targetmod != X86_FAR)
+ if (op->targetmod != X86_FAR &&
+ op->targetmod != X86_FAR_SEGOFF)
mismatch = 1;
break;
case OPTM_To:
yasm_internal_error(N_("unrecognized x86 extended modifier"));
}
- /* Shortcut to JmpRel */
- if (operands && (info->operands[0] & OPA_MASK) == OPA_JmpRel) {
- x86_finalize_jmp(arch, bc, prev_bc, data, num_operands, operands,
- num_prefixes, prefixes, num_segregs, segregs, info);
- return;
+ if (operands) {
+ switch (info->operands[0] & OPA_MASK) {
+ case OPA_JmpRel:
+ /* Shortcut to JmpRel */
+ x86_finalize_jmp(arch, bc, prev_bc, data, num_operands,
+ operands, num_prefixes, prefixes, info);
+ return;
+ case OPA_JmpFar:
+ /* Shortcut to JmpFar */
+ x86_finalize_jmpfar(arch, bc, data, num_operands, operands,
+ num_prefixes, prefixes, info);
+ return;
+ }
}
/* Copy what we can from info */
insn = yasm_xmalloc(sizeof(x86_insn));
- insn->mode_bits = mode_bits;
+ x86_finalize_common(&insn->common, info, mode_bits);
+ x86_finalize_opcode(&insn->opcode, info);
insn->ea = NULL;
origin = NULL;
imm = NULL;
- insn->addrsize = 0;
- insn->opersize = info->opersize;
- insn->lockrep_pre = 0;
insn->def_opersize_64 = info->def_opersize_64;
insn->special_prefix = info->special_prefix;
- insn->opcode_len = info->opcode_len;
- insn->opcode[0] = info->opcode[0];
- insn->opcode[1] = info->opcode[1];
- insn->opcode[2] = info->opcode[2];
spare = info->spare;
im_len = 0;
im_sign = 0;
if (info->modifiers & MOD_Gap0)
mod_data >>= 8;
if (info->modifiers & MOD_Op2Add) {
- insn->opcode[2] += (unsigned char)(mod_data & 0xFF);
+ insn->opcode.opcode[2] += (unsigned char)(mod_data & 0xFF);
mod_data >>= 8;
}
if (info->modifiers & MOD_Gap1)
mod_data >>= 8;
if (info->modifiers & MOD_Op1Add) {
- insn->opcode[1] += (unsigned char)(mod_data & 0xFF);
+ insn->opcode.opcode[1] += (unsigned char)(mod_data & 0xFF);
mod_data >>= 8;
}
if (info->modifiers & MOD_Gap2)
mod_data >>= 8;
if (info->modifiers & MOD_Op0Add) {
- insn->opcode[0] += (unsigned char)(mod_data & 0xFF);
+ insn->opcode.opcode[0] += (unsigned char)(mod_data & 0xFF);
mod_data >>= 8;
}
if (info->modifiers & MOD_PreAdd) {
mod_data >>= 8;
}
if (info->modifiers & MOD_OpSizeR) {
- insn->opersize = (unsigned char)(mod_data & 0xFF);
+ insn->common.opersize = (unsigned char)(mod_data & 0xFF);
mod_data >>= 8;
}
if (info->modifiers & MOD_Imm8) {
N_("invalid combination of opcode and operands"));
return;
}
- insn->opcode[0] += opadd;
+ insn->opcode.opcode[0] += opadd;
} else
yasm_internal_error(N_("invalid operand conversion"));
break;
N_("invalid combination of opcode and operands"));
return;
}
- insn->opcode[1] += opadd;
+ insn->opcode.opcode[1] += opadd;
} else
yasm_internal_error(N_("invalid operand conversion"));
break;
}
}
- if (insn->ea)
+ if (insn->ea) {
yasm_x86__ea_init(insn->ea, spare, origin);
+ for (i=0; i<num_segregs; i++)
+ yasm_ea_set_segreg(insn->ea, segregs[i], bc->line);
+ }
if (imm) {
insn->imm = yasm_imm_create_expr(imm);
insn->imm->len = im_len;
/* Transform the bytecode */
yasm_x86__bc_transform_insn(bc, insn);
- yasm_x86__bc_apply_prefixes(bc, num_prefixes, prefixes, num_segregs,
- segregs);
+ yasm_x86__bc_apply_prefixes(bc, num_prefixes, prefixes);
}