From 9e99ccc360bc9784ebe5ce29d5fa2c72acfc5777 Mon Sep 17 00:00:00 2001 From: Mike Pall Date: Mon, 23 May 2016 00:27:51 +0200 Subject: [PATCH] Strip out old infrastructure for 64 bit constants. Contributed by Peter Cawley. --- src/lj_ir.c | 74 -------------------------------------------------- src/lj_iropt.h | 3 -- src/lj_jit.h | 1 - src/lj_trace.c | 2 -- 4 files changed, 80 deletions(-) diff --git a/src/lj_ir.c b/src/lj_ir.c index 124d5791..9c0a2224 100644 --- a/src/lj_ir.c +++ b/src/lj_ir.c @@ -204,80 +204,6 @@ found: return TREF(ref, IRT_INT); } -/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the -** 64 bit constant. The constants themselves are stored in a chained array -** and shared across traces. -** -** Rationale for choosing this data structure: -** - The address of the constants is embedded in the generated machine code -** and must never move. A resizable array or hash table wouldn't work. -** - Most apps need very few non-32 bit integer constants (less than a dozen). -** - Linear search is hard to beat in terms of speed and low complexity. -*/ -typedef struct K64Array { - MRef next; /* Pointer to next list. */ - MSize numk; /* Number of used elements in this array. */ - TValue k[LJ_MIN_K64SZ]; /* Array of constants. */ -} K64Array; - -/* Free all chained arrays. */ -void lj_ir_k64_freeall(jit_State *J) -{ - K64Array *k; - for (k = mref(J->k64p, K64Array); k; ) { - K64Array *next = mref(k->next, K64Array); - lj_mem_free(J2G(J), k, sizeof(K64Array)); - k = next; - } - setmref(J->k64p, NULL); -} - -/* Get new 64 bit constant slot. */ -static TValue *ir_k64_add(jit_State *J, K64Array *kp, uint64_t u64) -{ - TValue *ntv; - if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */ - K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array); - setmref(kn->next, NULL); - kn->numk = 0; - if (kp) - setmref(kp->next, kn); /* Chain to the end of the list. */ - else - setmref(J->k64p, kn); /* Link first array. */ - kp = kn; - } - ntv = &kp->k[kp->numk++]; /* Add to current array. */ - ntv->u64 = u64; - return ntv; -} - -/* Find 64 bit constant in chained array or add it. */ -cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64) -{ - K64Array *k, *kp = NULL; - MSize idx; - /* Search for the constant in the whole chain of arrays. */ - for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) { - kp = k; /* Remember previous element in list. */ - for (idx = 0; idx < k->numk; idx++) { /* Search one array. */ - TValue *tv = &k->k[idx]; - if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */ - return tv; - } - } - /* Otherwise add a new constant. */ - return ir_k64_add(J, kp, u64); -} - -TValue *lj_ir_k64_reserve(jit_State *J) -{ - K64Array *k, *kp = NULL; - lj_ir_k64_find(J, 0); /* Intern dummy 0 to protect the reserved slot. */ - /* Find last K64Array, if any. */ - for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) kp = k; - return ir_k64_add(J, kp, 0); /* Set to 0. Final value is set later. */ -} - /* Intern 64 bit constant, given by its 64 bit pattern. */ TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64) { diff --git a/src/lj_iropt.h b/src/lj_iropt.h index 219d391a..8b7a43de 100644 --- a/src/lj_iropt.h +++ b/src/lj_iropt.h @@ -40,10 +40,7 @@ LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs); /* Interning of constants. */ LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); -LJ_FUNC void lj_ir_k64_freeall(jit_State *J); LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64); -LJ_FUNC TValue *lj_ir_k64_reserve(jit_State *J); -LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64); LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); diff --git a/src/lj_jit.h b/src/lj_jit.h index e9ab319e..55fbea8b 100644 --- a/src/lj_jit.h +++ b/src/lj_jit.h @@ -392,7 +392,6 @@ typedef struct jit_State { int32_t framedepth; /* Current frame depth. */ int32_t retdepth; /* Return frame depth (count of RETF). */ - MRef k64p; /* Pointer to chained array of 64 bit constants. */ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ TValue k64[LJ_K64__MAX]; /* Common 8 byte constants used by backends. */ uint32_t k32[LJ_K32__MAX]; /* Ditto for 4 byte constants. */ diff --git a/src/lj_trace.c b/src/lj_trace.c index eaf9365c..87146832 100644 --- a/src/lj_trace.c +++ b/src/lj_trace.c @@ -295,7 +295,6 @@ int lj_trace_flushall(lua_State *L) memset(J->penalty, 0, sizeof(J->penalty)); /* Free the whole machine code and invalidate all exit stub groups. */ lj_mcode_free(J); - lj_ir_k64_freeall(J); memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); lj_vmevent_send(L, TRACE, setstrV(L, L->top++, lj_str_newlit(L, "flush")); @@ -351,7 +350,6 @@ void lj_trace_freestate(global_State *g) } #endif lj_mcode_free(J); - lj_ir_k64_freeall(J); lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);