Fix some portability issues with the JIT compiler.

This commit is contained in:
Mike Pall 2011-05-09 18:16:39 +02:00
parent 28e87d33e9
commit 67d3ac9b19
6 changed files with 38 additions and 17 deletions

View File

@ -368,12 +368,16 @@ LJLIB_CF(jit_util_tracemc)
/* local addr = jit.util.traceexitstub(idx) */
LJLIB_CF(jit_util_traceexitstub)
{
#ifdef EXITSTUBS_PER_GROUP
ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
jit_State *J = L2J(L);
if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
return 1;
}
#else
UNUSED(L);
#endif
return 0;
}

View File

@ -211,10 +211,15 @@ int luaJIT_setmode(lua_State *L, int idx, int mode)
} else {
if (!(mode & LUAJIT_MODE_ON))
G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
#if LJ_TARGET_X86ORX64
else if ((G2J(g)->flags & JIT_F_SSE2))
G2J(g)->flags |= (uint32_t)JIT_F_ON;
else
return 0; /* Don't turn on JIT compiler without SSE2 support. */
#else
else
G2J(g)->flags |= (uint32_t)JIT_F_ON;
#endif
lj_dispatch_update(g);
}
break;

View File

@ -27,7 +27,8 @@
#define JIT_F_CPU_FIRST JIT_F_CMOV
#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
#else
#error "Missing CPU-specific JIT engine flags"
#define JIT_F_CPU_FIRST 0
#define JIT_F_CPUSTRING ""
#endif
/* Optimization flags. */
@ -118,7 +119,11 @@ typedef enum {
} PostProc;
/* Machine code type. */
#if LJ_TARGET_X86ORX64
typedef uint8_t MCode;
#else
typedef uint32_t MCode;
#endif
/* Stack snapshot header. */
typedef struct SnapShot {
@ -252,6 +257,13 @@ enum {
#define lj_resetsplit(J) UNUSED(J)
#endif
/* Exit stubs. */
#if LJ_TARGET_X86ORX64
/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
#define EXITSTUB_SPACING (2+2)
#define EXITSTUBS_PER_GROUP 32
#endif
/* Fold state is used to fold instructions on-the-fly. */
typedef struct FoldState {
IRIns ins; /* Currently emitted instruction. */
@ -318,7 +330,9 @@ typedef struct jit_State {
int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
#if LJ_TARGET_X86ORX64
MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
#endif
HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
uint32_t penaltyslot; /* Round-robin index into penalty slots. */
@ -344,7 +358,7 @@ typedef struct jit_State {
size_t szallmcarea; /* Total size of all allocated mcode areas. */
TValue errinfo; /* Additional info element for trace errors. */
} jit_State;
} LJ_ALIGN(16) jit_State;
/* Trivial PRNG e.g. used for penalty randomization. */
static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
@ -354,21 +368,14 @@ static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
return J->prngstate >> (32-bits);
}
/* Exit stubs. */
#if LJ_TARGET_X86ORX64
/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
#define EXITSTUB_SPACING (2+2)
#define EXITSTUBS_PER_GROUP 32
#else
#error "Missing CPU-specific exit stub definitions"
#endif
#ifdef EXITSTUBS_PER_GROUP
/* Return the address of an exit stub. */
static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno)
{
lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL);
return J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] +
EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
return (MCode *)((char *)J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] +
EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP));
}
#endif
#endif

View File

@ -274,7 +274,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
} else {
MCode *mc = J->mcarea;
/* Try current area first to use the protection cache. */
if (ptr >= mc && ptr < mc + J->szmcarea) {
if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
mcode_protect(J, MCPROT_GEN);
return mc;
}
@ -282,7 +282,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
for (;;) {
mc = ((MCLink *)mc)->next;
lua_assert(mc != NULL);
if (ptr >= mc && ptr < mc + ((MCLink *)mc)->size) {
if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN);
return mc;
}

View File

@ -406,7 +406,10 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
if (irt_isinteger(t)) {
setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
} else if (irt_isnum(t)) {
setnumV(o, ex->fpr[r-RID_MIN_FPR]);
if (RID_NUM_FPR)
setnumV(o, ex->fpr[r-RID_MIN_FPR]);
else
setnumV(o, *(double *)&ex->gpr[r-RID_MIN_GPR]);
#if LJ_64
} else if (irt_islightud(t)) {
/* 64 bit lightuserdata which may escape already has the tag bits. */

View File

@ -278,7 +278,9 @@ int lj_trace_flushall(lua_State *L)
memset(J->penalty, 0, sizeof(J->penalty));
/* Free the whole machine code and invalidate all exit stub groups. */
lj_mcode_free(J);
#ifdef EXITSTUBS_PER_GROUP
memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
#endif
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "flush"));
);
@ -700,7 +702,7 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
lj_vmevent_send(L, TEXIT,
ExitState *ex = (ExitState *)exptr;
uint32_t i;
int32_t i;
lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
setintV(L->top++, J->parent);
setintV(L->top++, J->exitno);