Fix some portability issues with the JIT compiler.

This commit is contained in:
Mike Pall 2011-05-09 18:16:39 +02:00
parent 28e87d33e9
commit 67d3ac9b19
6 changed files with 38 additions and 17 deletions

View File

@ -368,12 +368,16 @@ LJLIB_CF(jit_util_tracemc)
/* local addr = jit.util.traceexitstub(idx) */ /* local addr = jit.util.traceexitstub(idx) */
LJLIB_CF(jit_util_traceexitstub) LJLIB_CF(jit_util_traceexitstub)
{ {
#ifdef EXITSTUBS_PER_GROUP
ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1); ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
jit_State *J = L2J(L); jit_State *J = L2J(L);
if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) { if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno)); setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
return 1; return 1;
} }
#else
UNUSED(L);
#endif
return 0; return 0;
} }

View File

@ -211,10 +211,15 @@ int luaJIT_setmode(lua_State *L, int idx, int mode)
} else { } else {
if (!(mode & LUAJIT_MODE_ON)) if (!(mode & LUAJIT_MODE_ON))
G2J(g)->flags &= ~(uint32_t)JIT_F_ON; G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
#if LJ_TARGET_X86ORX64
else if ((G2J(g)->flags & JIT_F_SSE2)) else if ((G2J(g)->flags & JIT_F_SSE2))
G2J(g)->flags |= (uint32_t)JIT_F_ON; G2J(g)->flags |= (uint32_t)JIT_F_ON;
else else
return 0; /* Don't turn on JIT compiler without SSE2 support. */ return 0; /* Don't turn on JIT compiler without SSE2 support. */
#else
else
G2J(g)->flags |= (uint32_t)JIT_F_ON;
#endif
lj_dispatch_update(g); lj_dispatch_update(g);
} }
break; break;

View File

@ -27,7 +27,8 @@
#define JIT_F_CPU_FIRST JIT_F_CMOV #define JIT_F_CPU_FIRST JIT_F_CMOV
#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM" #define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
#else #else
#error "Missing CPU-specific JIT engine flags" #define JIT_F_CPU_FIRST 0
#define JIT_F_CPUSTRING ""
#endif #endif
/* Optimization flags. */ /* Optimization flags. */
@ -118,7 +119,11 @@ typedef enum {
} PostProc; } PostProc;
/* Machine code type. */ /* Machine code type. */
#if LJ_TARGET_X86ORX64
typedef uint8_t MCode; typedef uint8_t MCode;
#else
typedef uint32_t MCode;
#endif
/* Stack snapshot header. */ /* Stack snapshot header. */
typedef struct SnapShot { typedef struct SnapShot {
@ -252,6 +257,13 @@ enum {
#define lj_resetsplit(J) UNUSED(J) #define lj_resetsplit(J) UNUSED(J)
#endif #endif
/* Exit stubs. */
#if LJ_TARGET_X86ORX64
/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
#define EXITSTUB_SPACING (2+2)
#define EXITSTUBS_PER_GROUP 32
#endif
/* Fold state is used to fold instructions on-the-fly. */ /* Fold state is used to fold instructions on-the-fly. */
typedef struct FoldState { typedef struct FoldState {
IRIns ins; /* Currently emitted instruction. */ IRIns ins; /* Currently emitted instruction. */
@ -318,7 +330,9 @@ typedef struct jit_State {
int32_t param[JIT_P__MAX]; /* JIT engine parameters. */ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
#if LJ_TARGET_X86ORX64
MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
#endif
HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
uint32_t penaltyslot; /* Round-robin index into penalty slots. */ uint32_t penaltyslot; /* Round-robin index into penalty slots. */
@ -344,7 +358,7 @@ typedef struct jit_State {
size_t szallmcarea; /* Total size of all allocated mcode areas. */ size_t szallmcarea; /* Total size of all allocated mcode areas. */
TValue errinfo; /* Additional info element for trace errors. */ TValue errinfo; /* Additional info element for trace errors. */
} jit_State; } LJ_ALIGN(16) jit_State;
/* Trivial PRNG e.g. used for penalty randomization. */ /* Trivial PRNG e.g. used for penalty randomization. */
static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
@ -354,21 +368,14 @@ static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
return J->prngstate >> (32-bits); return J->prngstate >> (32-bits);
} }
/* Exit stubs. */ #ifdef EXITSTUBS_PER_GROUP
#if LJ_TARGET_X86ORX64
/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
#define EXITSTUB_SPACING (2+2)
#define EXITSTUBS_PER_GROUP 32
#else
#error "Missing CPU-specific exit stub definitions"
#endif
/* Return the address of an exit stub. */ /* Return the address of an exit stub. */
static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno) static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno)
{ {
lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL); lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL);
return J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] + return (MCode *)((char *)J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] +
EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP));
} }
#endif
#endif #endif

View File

@ -274,7 +274,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
} else { } else {
MCode *mc = J->mcarea; MCode *mc = J->mcarea;
/* Try current area first to use the protection cache. */ /* Try current area first to use the protection cache. */
if (ptr >= mc && ptr < mc + J->szmcarea) { if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
mcode_protect(J, MCPROT_GEN); mcode_protect(J, MCPROT_GEN);
return mc; return mc;
} }
@ -282,7 +282,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
for (;;) { for (;;) {
mc = ((MCLink *)mc)->next; mc = ((MCLink *)mc)->next;
lua_assert(mc != NULL); lua_assert(mc != NULL);
if (ptr >= mc && ptr < mc + ((MCLink *)mc)->size) { if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN); mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN);
return mc; return mc;
} }

View File

@ -406,7 +406,10 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
if (irt_isinteger(t)) { if (irt_isinteger(t)) {
setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]); setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
} else if (irt_isnum(t)) { } else if (irt_isnum(t)) {
if (RID_NUM_FPR)
setnumV(o, ex->fpr[r-RID_MIN_FPR]); setnumV(o, ex->fpr[r-RID_MIN_FPR]);
else
setnumV(o, *(double *)&ex->gpr[r-RID_MIN_GPR]);
#if LJ_64 #if LJ_64
} else if (irt_islightud(t)) { } else if (irt_islightud(t)) {
/* 64 bit lightuserdata which may escape already has the tag bits. */ /* 64 bit lightuserdata which may escape already has the tag bits. */

View File

@ -278,7 +278,9 @@ int lj_trace_flushall(lua_State *L)
memset(J->penalty, 0, sizeof(J->penalty)); memset(J->penalty, 0, sizeof(J->penalty));
/* Free the whole machine code and invalidate all exit stub groups. */ /* Free the whole machine code and invalidate all exit stub groups. */
lj_mcode_free(J); lj_mcode_free(J);
#ifdef EXITSTUBS_PER_GROUP
memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
#endif
lj_vmevent_send(L, TRACE, lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "flush")); setstrV(L, L->top++, lj_str_newlit(L, "flush"));
); );
@ -700,7 +702,7 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
lj_vmevent_send(L, TEXIT, lj_vmevent_send(L, TEXIT,
ExitState *ex = (ExitState *)exptr; ExitState *ex = (ExitState *)exptr;
uint32_t i; int32_t i;
lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
setintV(L->top++, J->parent); setintV(L->top++, J->parent);
setintV(L->top++, J->exitno); setintV(L->top++, J->exitno);