diff --git a/src/lj_dispatch.c b/src/lj_dispatch.c index 7b3ff80b..83bb4fd8 100644 --- a/src/lj_dispatch.c +++ b/src/lj_dispatch.c @@ -414,7 +414,8 @@ ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) #if LJ_HASJIT J->L = L; if ((uintptr_t)pc & 1) { /* Marker for hot call. */ - lj_trace_hot(J, (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1)); + pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); + lj_trace_hot(J, pc); goto out; } else if (J->state != LJ_TRACE_IDLE && !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { diff --git a/src/lj_record.c b/src/lj_record.c index e5a8b208..f4bfd5f7 100644 --- a/src/lj_record.c +++ b/src/lj_record.c @@ -2248,6 +2248,11 @@ static const BCIns *rec_setup_root(jit_State *J) J->maxslot = ra; pc++; break; + case BC_FUNCF: + /* No bytecode range check for root traces started by a hot call. */ + J->maxslot = J->pt->numparams; + pc++; + break; default: lua_assert(0); break; @@ -2370,15 +2375,11 @@ void lj_record_setup(jit_State *J) rec_stop(J, TRACE_INTERP); } else { /* Root trace. */ J->cur.root = 0; - if (J->pc >= proto_bc(J->pt)) { /* Not a hot CALL? */ - J->cur.startins = *J->pc; - J->pc = rec_setup_root(J); - /* Note: the loop instruction itself is recorded at the end and not - ** at the start! So snapshot #0 needs to point to the *next* instruction. - */ - } else { - J->cur.startins = BCINS_ABC(BC_CALL, 0, 0, 0); - } + J->cur.startins = *J->pc; + J->pc = rec_setup_root(J); + /* Note: the loop instruction itself is recorded at the end and not + ** at the start! So snapshot #0 needs to point to the *next* instruction. + */ lj_snap_add(J); if (bc_op(J->cur.startins) == BC_FORL) rec_setup_forl(J, J->pc-1); diff --git a/src/lj_trace.c b/src/lj_trace.c index ae88f844..3773cffe 100644 --- a/src/lj_trace.c +++ b/src/lj_trace.c @@ -163,6 +163,7 @@ static void trace_unpatch(jit_State *J, Trace *T) BCOp op = bc_op(T->startins); MSize pcofs = T->snap[0].mapofs + T->snap[0].nent; BCIns *pc = ((BCIns *)snap_pc(T->snapmap[pcofs])) - 1; + UNUSED(J); switch (op) { case BC_FORL: lua_assert(bc_op(*pc) == BC_JFORI); @@ -181,8 +182,9 @@ static void trace_unpatch(jit_State *J, Trace *T) lua_assert(bc_op(*pc) == BC_JITERL && J->trace[bc_d(*pc)] == T); *pc = T->startins; break; - case BC_CALL: - lj_trace_err(J, LJ_TRERR_NYILNKF); + case BC_FUNCF: + lua_assert(bc_op(*pc) == BC_JFUNCF && J->trace[bc_d(*pc)] == T); + *pc = T->startins; break; case BC_JMP: /* No need to unpatch branches in parent traces (yet). */ default: @@ -384,6 +386,7 @@ static void trace_stop(jit_State *J) /* fallthrough */ case BC_LOOP: case BC_ITERL: + case BC_FUNCF: /* Patch bytecode of starting instruction in root trace. */ setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); setbc_d(pc, J->curtrace); @@ -391,9 +394,6 @@ static void trace_stop(jit_State *J) J->cur.nextroot = pt->trace; pt->trace = (TraceNo1)J->curtrace; break; - case BC_CALL: - lj_trace_err(J, LJ_TRERR_NYILNKF); - break; case BC_JMP: /* Patch exit branch in parent to side trace entry. */ lua_assert(J->parent != 0 && J->cur.root != 0);