mirror of
https://github.com/LuaJIT/LuaJIT.git
synced 2025-02-12 17:24:09 +00:00
Drop range limit for sunk stores relative to sunk allocation.
This commit is contained in:
parent
bd782cedd5
commit
7c056488d9
@ -378,7 +378,7 @@ local function ridsp_name(ridsp, ins)
|
||||
if not disass then disass = require("jit.dis_"..jit.arch) end
|
||||
local rid, slot = band(ridsp, 0xff), shr(ridsp, 8)
|
||||
if rid == 253 or rid == 254 then
|
||||
return slot == 0 and " {sink" or format(" {%04d", ins-slot)
|
||||
return (slot == 0 or slot == 255) and " {sink" or format(" {%04d", ins-slot)
|
||||
end
|
||||
if ridsp > 255 then return format("[%x]", slot*4) end
|
||||
if rid < 128 then return disass.regname(rid) end
|
||||
|
21
src/lj_asm.c
21
src/lj_asm.c
@ -778,6 +778,23 @@ static int asm_snap_canremat(ASMState *as)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check whether a sunk store corresponds to an allocation. */
|
||||
static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
|
||||
{
|
||||
if (irs->s == 255) {
|
||||
if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
|
||||
irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
|
||||
IRIns *irk = IR(irs->op1);
|
||||
if (irk->o == IR_AREF || irk->o == IR_HREFK)
|
||||
irk = IR(irk->op1);
|
||||
return (IR(irk->op1) == ira);
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
return (ira + irs->s == irs); /* Quick check. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate register or spill slot for a ref that escapes to a snapshot. */
|
||||
static void asm_snap_alloc1(ASMState *as, IRRef ref)
|
||||
{
|
||||
@ -795,8 +812,8 @@ static void asm_snap_alloc1(ASMState *as, IRRef ref)
|
||||
else { /* Allocate stored values for TNEW, TDUP and CNEW. */
|
||||
IRIns *irs;
|
||||
lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
|
||||
for (irs = IR(as->curins); irs > ir; irs--)
|
||||
if (irs->r == RID_SINK && ir + irs->s == irs) {
|
||||
for (irs = IR(as->snapref-1); irs > ir; irs--)
|
||||
if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
|
||||
lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
|
||||
irs->o == IR_FSTORE || irs->o == IR_XSTORE);
|
||||
asm_snap_alloc1(as, irs->op2);
|
||||
|
@ -32,8 +32,6 @@ static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
|
||||
ir = IR(ir->op1);
|
||||
if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
|
||||
return NULL; /* Not an allocation. */
|
||||
if (ir + 255 < irs)
|
||||
return NULL; /* Out of range. */
|
||||
return ir; /* Return allocation. */
|
||||
}
|
||||
|
||||
@ -173,10 +171,12 @@ static void sink_sweep_ins(jit_State *J)
|
||||
switch (ir->o) {
|
||||
case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
|
||||
IRIns *ira = sink_checkalloc(J, ir);
|
||||
if (ira && !irt_ismarked(ira->t))
|
||||
ir->prev = REGSP(RID_SINK, (int)(ir - ira));
|
||||
else
|
||||
if (ira && !irt_ismarked(ira->t)) {
|
||||
int delta = (int)(ir - ira);
|
||||
ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
|
||||
} else {
|
||||
ir->prev = REGSP_INIT;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR_NEWREF:
|
||||
|
@ -403,6 +403,27 @@ static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
|
||||
return tr;
|
||||
}
|
||||
|
||||
/* Check whether a sunk store corresponds to an allocation. Slow path. */
|
||||
static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs)
|
||||
{
|
||||
if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
|
||||
irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
|
||||
IRIns *irk = IR(irs->op1);
|
||||
if (irk->o == IR_AREF || irk->o == IR_HREFK)
|
||||
irk = IR(irk->op1);
|
||||
return (IR(irk->op1) == ira);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check whether a sunk store corresponds to an allocation. Fast path. */
|
||||
static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs)
|
||||
{
|
||||
if (irs->s != 255)
|
||||
return (ira + irs->s == irs); /* Fast check. */
|
||||
return snap_sunk_store2(J, ira, irs);
|
||||
}
|
||||
|
||||
/* Replay snapshot state to setup side trace. */
|
||||
void lj_snap_replay(jit_State *J, GCtrace *T)
|
||||
{
|
||||
@ -464,7 +485,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
|
||||
} else {
|
||||
IRIns *irs;
|
||||
for (irs = ir+1; irs < irlast; irs++)
|
||||
if (irs->r == RID_SINK && ir + irs->s == irs) {
|
||||
if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
|
||||
if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
|
||||
snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
|
||||
else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
|
||||
@ -504,7 +525,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
|
||||
TRef tr = emitir(ir->ot, op1, op2);
|
||||
J->slot[snap_slot(sn)] = tr;
|
||||
for (irs = ir+1; irs < irlast; irs++)
|
||||
if (irs->r == RID_SINK && ir + irs->s == irs) {
|
||||
if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
|
||||
IRIns *irr = &T->ir[irs->op1];
|
||||
TRef val, key = irr->op2, tmp = tr;
|
||||
if (irr->o != IR_FREF) {
|
||||
@ -700,7 +721,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
|
||||
} else {
|
||||
IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
|
||||
for (irs = ir+1; irs < irlast; irs++)
|
||||
if (irs->r == RID_SINK && ir + irs->s == irs) {
|
||||
if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
|
||||
IRIns *iro = &T->ir[T->ir[irs->op1].op2];
|
||||
uint8_t *p = (uint8_t *)cd;
|
||||
CTSize szs;
|
||||
@ -733,7 +754,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
|
||||
settabV(J->L, o, t);
|
||||
irlast = &T->ir[T->snap[snapno].ref];
|
||||
for (irs = ir+1; irs < irlast; irs++)
|
||||
if (irs->r == RID_SINK && ir + irs->s == irs) {
|
||||
if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
|
||||
IRIns *irk = &T->ir[irs->op1];
|
||||
TValue tmp, *val;
|
||||
lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
|
||||
|
Loading…
Reference in New Issue
Block a user