static int zend_may_overflow(const zend_op *opline, const zend_op_array *op_array, zend_ssa *ssa);
static void ZEND_FASTCALL zend_runtime_jit(void);
+static int zend_jit_trace_op_len(const zend_op *opline);
static int zend_jit_trace_may_exit(const zend_op_array *op_array, const zend_op *opline, zend_jit_trace_rec *trace);
static uint32_t zend_jit_trace_get_exit_point(const zend_op *from_opline, const zend_op *to_opline, zend_jit_trace_rec *trace, uint32_t flags);
static const void *zend_jit_trace_get_exit_addr(uint32_t n);
case ZEND_INIT_FCALL:
case ZEND_INIT_FCALL_BY_NAME:
case ZEND_INIT_NS_FCALL_BY_NAME:
- if (!zend_jit_init_fcall(&dasm_state, opline, b, op_array, ssa, call_level, NULL)) {
+ if (!zend_jit_init_fcall(&dasm_state, opline, b, op_array, ssa, ssa_op, call_level, NULL)) {
goto jit_failure;
}
goto done;
#define ZEND_JIT_EXIT_JITED (1<<0)
#define ZEND_JIT_EXIT_BLACKLISTED (1<<1)
#define ZEND_JIT_EXIT_TO_VM (1<<2) /* exit to VM without attempt to create a side trace */
+#define ZEND_JIT_EXIT_RESTORE_CALL (1<<3) /* deoptimizer should restore EX(call) chain */
typedef union _zend_op_trace_info {
zend_op dummy; /* the size of this structure must be the same as zend_op */
uint32_t stack_size;
zend_jit_trace_stack *stack = NULL;
+ if (delayed_call_chain) {
+ assert(to_opline != NULL); /* CALL and IP share the same register */
+ flags |= ZEND_JIT_EXIT_RESTORE_CALL;
+ }
if (JIT_G(current_frame)) {
op_array = &JIT_G(current_frame)->func->op_array;
stack_size = op_array->last_var + op_array->T;
}
}
- // TODO: Merge two loops implementing parallel move ???
- for (i = 0; i < parent_vars_count; i++) {
- if (STACK_REG(parent_stack, i) != ZREG_NONE) {
- if (ra && ra[i] && ra[i]->reg == STACK_REG(parent_stack, i)) {
- /* register already loaded by parent trace */
- SET_STACK_REG(stack, i, ra[i]->reg);
- } else if (!zend_jit_store_var(&dasm_state, ssa->var_info[i].type, i, STACK_REG(parent_stack, i))) {
- goto jit_failure;
+ if (parent_trace) {
+ /* Deoptimization */
+
+ // TODO: Merge this loop with the following LOAD loop to implement parallel move ???
+ for (i = 0; i < parent_vars_count; i++) {
+ if (STACK_REG(parent_stack, i) != ZREG_NONE) {
+ if (ra && ra[i] && ra[i]->reg == STACK_REG(parent_stack, i)) {
+ /* register already loaded by parent trace */
+ SET_STACK_REG(stack, i, ra[i]->reg);
+ } else if (!zend_jit_store_var(&dasm_state, ssa->var_info[i].type, i, STACK_REG(parent_stack, i))) {
+ goto jit_failure;
+ }
}
}
+
+ if (zend_jit_traces[parent_trace].exit_info[exit_num].flags & ZEND_JIT_EXIT_RESTORE_CALL) {
+ zend_jit_save_call_chain(&dasm_state, -1);
+ }
}
if (ra
case ZEND_INIT_FCALL:
case ZEND_INIT_FCALL_BY_NAME:
case ZEND_INIT_NS_FCALL_BY_NAME:
- if (!zend_jit_init_fcall(&dasm_state, opline, op_array_ssa->cfg.map ? op_array_ssa->cfg.map[opline - op_array->opcodes] : -1, op_array, op_array_ssa, frame->call_level, p + 1)) {
+ if (!zend_jit_init_fcall(&dasm_state, opline, op_array_ssa->cfg.map ? op_array_ssa->cfg.map[opline - op_array->opcodes] : -1, op_array, ssa, ssa_op, frame->call_level, p + 1)) {
goto jit_failure;
}
goto done;
static int zend_jit_trace_exit_needs_deoptimization(uint32_t trace_num, uint32_t exit_num)
{
const zend_op *opline = zend_jit_traces[trace_num].exit_info[exit_num].opline;
+ uint32_t flags = zend_jit_traces[trace_num].exit_info[exit_num].flags;
uint32_t stack_size;
zend_jit_trace_stack *stack;
- if (opline) {
+ if (opline || (flags & ZEND_JIT_EXIT_RESTORE_CALL)) {
return 1;
}
zend_jit_align_func(&dasm_state);
/* Deoptimization */
+ if (zend_jit_traces[trace_num].exit_info[exit_num].flags & ZEND_JIT_EXIT_RESTORE_CALL) {
+ zend_jit_save_call_chain(&dasm_state, -1);
+ }
stack_size = zend_jit_traces[trace_num].exit_info[exit_num].stack_size;
stack = zend_jit_traces[trace_num].stack_map + zend_jit_traces[trace_num].exit_info[exit_num].stack_offset;
for (i = 0; i < stack_size; i++) {
if (t->exit_info[i].flags & ZEND_JIT_EXIT_TO_VM) {
fprintf(stderr, "/VM");
}
+ if (t->exit_info[i].flags & ZEND_JIT_EXIT_RESTORE_CALL) {
+ fprintf(stderr, "/CALL");
+ }
for (j = 0; j < stack_size; j++) {
zend_uchar type = STACK_TYPE(stack, j);
if (type != IS_UNKNOWN) {
uint32_t stack_size = t->exit_info[exit_num].stack_size;
zend_jit_trace_stack *stack = t->stack_map + t->exit_info[exit_num].stack_offset;
+ if (t->exit_info[exit_num].flags & ZEND_JIT_EXIT_RESTORE_CALL) {
+ zend_execute_data *call = (zend_execute_data *)regs->r[ZREG_RX];
+ call->prev_execute_data = EX(call);
+ EX(call) = call;
+ }
+
for (i = 0; i < stack_size; i++) {
if (STACK_REG(stack, i) != ZREG_NONE) {
if (STACK_TYPE(stack, i) == IS_LONG) {
| // Save CPU registers
|.if X64
| sub r4, 16*8+16*8-8 /* CPU regs + SSE regs */
+ | mov aword [r4+15*8], r15
| mov aword [r4+11*8], r11
| mov aword [r4+10*8], r10
| mov aword [r4+9*8], r9
| movsd qword [r4+16*8+0*8], xmm0
|.else
| sub r4, 8*4+8*8-4 /* CPU regs + SSE regs */
+ | mov aword [r4+7*4], edi
| mov aword [r4+2*4], edx
| mov aword [r4+1*4], ecx
| mov aword [r4+0*4], eax
return 1;
}
-static int zend_jit_needs_call_chain(zend_call_info *call_info, uint32_t b, const zend_op_array *op_array, zend_ssa *ssa, const zend_op *opline)
+static int zend_jit_needs_call_chain(zend_call_info *call_info, uint32_t b, const zend_op_array *op_array, zend_ssa *ssa, const zend_ssa_op *ssa_op, const zend_op *opline, zend_jit_trace_rec *trace)
{
int skip;
+ if (trace) {
+ zend_jit_trace_rec *p = trace;
+
+ ssa_op++;
+ while (1) {
+ if (p->op == ZEND_JIT_TRACE_VM) {
+ switch (p->opline->opcode) {
+ case ZEND_SEND_ARRAY:
+ case ZEND_SEND_USER:
+ case ZEND_SEND_UNPACK:
+ case ZEND_INIT_FCALL:
+ case ZEND_INIT_METHOD_CALL:
+ case ZEND_INIT_STATIC_METHOD_CALL:
+ case ZEND_INIT_FCALL_BY_NAME:
+ case ZEND_INIT_NS_FCALL_BY_NAME:
+ case ZEND_INIT_DYNAMIC_CALL:
+ case ZEND_NEW:
+ case ZEND_INIT_USER_CALL:
+ case ZEND_FAST_CALL:
+ case ZEND_JMP:
+ case ZEND_JMPZNZ:
+ case ZEND_JMPZ:
+ case ZEND_JMPNZ:
+ case ZEND_JMPZ_EX:
+ case ZEND_JMPNZ_EX:
+ case ZEND_FE_RESET_R:
+ case ZEND_FE_RESET_RW:
+ case ZEND_JMP_SET:
+ case ZEND_COALESCE:
+ case ZEND_ASSERT_CHECK:
+ case ZEND_CATCH:
+ case ZEND_DECLARE_ANON_CLASS:
+ case ZEND_FE_FETCH_R:
+ case ZEND_FE_FETCH_RW:
+ return 1;
+ case ZEND_DO_ICALL:
+ case ZEND_DO_UCALL:
+ case ZEND_DO_FCALL_BY_NAME:
+ case ZEND_DO_FCALL:
+ return 0;
+ case ZEND_SEND_VAL:
+ case ZEND_SEND_VAR:
+ case ZEND_SEND_VAL_EX:
+ case ZEND_SEND_VAR_EX:
+ case ZEND_SEND_FUNC_ARG:
+ case ZEND_SEND_REF:
+ case ZEND_SEND_VAR_NO_REF:
+ case ZEND_SEND_VAR_NO_REF_EX:
+ /* skip */
+ break;
+ default:
+ if (zend_may_throw(opline, ssa_op, op_array, ssa)) {
+ return 1;
+ }
+ }
+ ssa_op += zend_jit_trace_op_len(opline);
+ } else if (p->op == ZEND_JIT_TRACE_ENTER ||
+ p->op == ZEND_JIT_TRACE_BACK ||
+ p->op == ZEND_JIT_TRACE_END) {
+ return 1;
+ }
+ p++;
+ }
+ }
+
if (!call_info) {
const zend_op *end = op_array->opcodes + op_array->last;
opline++;
+ ssa_op++;
skip = 1;
while (opline != end) {
if (!skip) {
- zend_ssa_op *ssa_op = &ssa->ops[opline - op_array->opcodes];
if (zend_may_throw(opline, ssa_op, op_array, ssa)) {
return 1;
}
return 0;
}
opline++;
+ ssa_op++;
}
return 1;
}
opline++;
+ ssa_op++;
skip = 1;
while (opline != end) {
if (skip) {
return 1;
}
} else {
- zend_ssa_op *ssa_op = &ssa->ops[opline - op_array->opcodes];
if (zend_may_throw(opline, ssa_op, op_array, ssa)) {
return 1;
}
}
opline++;
+ ssa_op++;
}
return 0;
return 1;
}
-static int zend_jit_init_fcall(dasm_State **Dst, const zend_op *opline, uint32_t b, const zend_op_array *op_array, zend_ssa *ssa, int call_level, zend_jit_trace_rec *trace)
+static int zend_jit_init_fcall(dasm_State **Dst, const zend_op *opline, uint32_t b, const zend_op_array *op_array, zend_ssa *ssa, const zend_ssa_op *ssa_op, int call_level, zend_jit_trace_rec *trace)
{
zend_func_info *info = ZEND_FUNC_INFO(op_array);
zend_call_info *call_info = NULL;
return 0;
}
- if (trace || zend_jit_needs_call_chain(call_info, b, op_array, ssa, opline)) {
+ if (zend_jit_needs_call_chain(call_info, b, op_array, ssa, ssa_op, opline, trace)) {
if (!zend_jit_save_call_chain(Dst, call_level)) {
return 0;
}