Strip out old infrastructure for 64 bit constants.

Contributed by Peter Cawley.
This commit is contained in:
Mike Pall 2016-05-23 00:27:51 +02:00
parent 7fb75ccc4c
commit 9e99ccc360
4 changed files with 0 additions and 80 deletions

View File

@ -204,80 +204,6 @@ found:
return TREF(ref, IRT_INT);
}
/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
** 64 bit constant. The constants themselves are stored in a chained array
** and shared across traces.
**
** Rationale for choosing this data structure:
** - The address of the constants is embedded in the generated machine code
** and must never move. A resizable array or hash table wouldn't work.
** - Most apps need very few non-32 bit integer constants (less than a dozen).
** - Linear search is hard to beat in terms of speed and low complexity.
*/
typedef struct K64Array {
MRef next; /* Pointer to next list. */
MSize numk; /* Number of used elements in this array. */
TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
} K64Array;
/* Free all chained arrays. */
void lj_ir_k64_freeall(jit_State *J)
{
K64Array *k;
for (k = mref(J->k64p, K64Array); k; ) {
K64Array *next = mref(k->next, K64Array);
lj_mem_free(J2G(J), k, sizeof(K64Array));
k = next;
}
setmref(J->k64p, NULL);
}
/* Get new 64 bit constant slot. */
static TValue *ir_k64_add(jit_State *J, K64Array *kp, uint64_t u64)
{
TValue *ntv;
if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
setmref(kn->next, NULL);
kn->numk = 0;
if (kp)
setmref(kp->next, kn); /* Chain to the end of the list. */
else
setmref(J->k64p, kn); /* Link first array. */
kp = kn;
}
ntv = &kp->k[kp->numk++]; /* Add to current array. */
ntv->u64 = u64;
return ntv;
}
/* Find 64 bit constant in chained array or add it. */
cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
{
K64Array *k, *kp = NULL;
MSize idx;
/* Search for the constant in the whole chain of arrays. */
for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) {
kp = k; /* Remember previous element in list. */
for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
TValue *tv = &k->k[idx];
if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
return tv;
}
}
/* Otherwise add a new constant. */
return ir_k64_add(J, kp, u64);
}
TValue *lj_ir_k64_reserve(jit_State *J)
{
K64Array *k, *kp = NULL;
lj_ir_k64_find(J, 0); /* Intern dummy 0 to protect the reserved slot. */
/* Find last K64Array, if any. */
for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) kp = k;
return ir_k64_add(J, kp, 0); /* Set to 0. Final value is set later. */
}
/* Intern 64 bit constant, given by its 64 bit pattern. */
TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64)
{

View File

@ -40,10 +40,7 @@ LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs);
/* Interning of constants. */
LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64);
LJ_FUNC TValue *lj_ir_k64_reserve(jit_State *J);
LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);

View File

@ -392,7 +392,6 @@ typedef struct jit_State {
int32_t framedepth; /* Current frame depth. */
int32_t retdepth; /* Return frame depth (count of RETF). */
MRef k64p; /* Pointer to chained array of 64 bit constants. */
TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
TValue k64[LJ_K64__MAX]; /* Common 8 byte constants used by backends. */
uint32_t k32[LJ_K32__MAX]; /* Ditto for 4 byte constants. */

View File

@ -295,7 +295,6 @@ int lj_trace_flushall(lua_State *L)
memset(J->penalty, 0, sizeof(J->penalty));
/* Free the whole machine code and invalidate all exit stub groups. */
lj_mcode_free(J);
lj_ir_k64_freeall(J);
memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "flush"));
@ -351,7 +350,6 @@ void lj_trace_freestate(global_State *g)
}
#endif
lj_mcode_free(J);
lj_ir_k64_freeall(J);
lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);