Refactoring of conversion ops, part 5: drop TOINT/TOI64/TONUM.

This commit is contained in:
Mike Pall 2010-12-31 04:09:32 +01:00
parent 1716540c55
commit 783dbd335f
4 changed files with 2 additions and 186 deletions

View File

@ -222,7 +222,6 @@ span.irt_int, span.irt_i8, span.irt_u8, span.irt_i16, span.irt_u16 { color: #b04
local colorize, irtype local colorize, irtype
-- Lookup tables to convert some literals into names. -- Lookup tables to convert some literals into names.
local tointname = { [0] = "check", "index", "", "Z", "S", "T", }
local litname = { local litname = {
["SLOAD "] = setmetatable({}, { __index = function(t, mode) ["SLOAD "] = setmetatable({}, { __index = function(t, mode)
local s = "" local s = ""
@ -246,8 +245,6 @@ local litname = {
t[mode] = s t[mode] = s
return s return s
end}), end}),
["TOINT "] = tointname,
["TOI64 "] = tointname,
["FLOAD "] = vmdef.irfield, ["FLOAD "] = vmdef.irfield,
["FREF "] = vmdef.irfield, ["FREF "] = vmdef.irfield,
["FPMATH"] = vmdef.irfpm, ["FPMATH"] = vmdef.irfpm,

View File

@ -1594,15 +1594,6 @@ static void asm_retf(ASMState *as, IRIns *ir)
/* -- Type conversions ---------------------------------------------------- */ /* -- Type conversions ---------------------------------------------------- */
static void asm_tonum(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir, RSET_FPR);
Reg left = asm_fuseload(as, ir->op1, RSET_GPR);
emit_mrm(as, XO_CVTSI2SD, dest, left);
if (!(as->flags & JIT_F_SPLIT_XMM))
emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
}
static void asm_tointg(ASMState *as, IRIns *ir, Reg left) static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
{ {
Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
@ -1617,13 +1608,6 @@ static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
/* Can't fuse since left is needed twice. */ /* Can't fuse since left is needed twice. */
} }
static void asm_toint(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir, RSET_GPR);
Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
emit_mrm(as, XO_CVTSD2SI, dest, left);
}
static void asm_tobit(ASMState *as, IRIns *ir) static void asm_tobit(ASMState *as, IRIns *ir)
{ {
Reg dest = ra_dest(as, ir, RSET_GPR); Reg dest = ra_dest(as, ir, RSET_GPR);
@ -1636,24 +1620,6 @@ static void asm_tobit(ASMState *as, IRIns *ir)
ra_left(as, tmp, ir->op1); ra_left(as, tmp, ir->op1);
} }
static void asm_toi64(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir, RSET_GPR);
IRRef lref = ir->op1;
lua_assert(LJ_64); /* NYI: 32 bit register pairs. */
if (ir->op2 == IRTOINT_TRUNCI64) {
Reg left = asm_fuseload(as, lref, RSET_FPR);
emit_mrm(as, XO_CVTTSD2SI, dest|REX_64, left);
} else if (ir->op2 == IRTOINT_ZEXT64) {
/* Nothing to do. This assumes 32 bit regs are already zero-extended. */
ra_left(as, dest, lref); /* But may need to move regs. */
} else {
Reg left = asm_fuseload(as, lref, RSET_GPR);
emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
lua_assert(ir->op2 == IRTOINT_SEXT64);
}
}
static void asm_conv(ASMState *as, IRIns *ir) static void asm_conv(ASMState *as, IRIns *ir)
{ {
IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
@ -2499,7 +2465,7 @@ static void asm_x87load(ASMState *as, IRRef ref)
emit_x87op(as, XI_FLD1); emit_x87op(as, XI_FLD1);
else else
emit_rma(as, XO_FLDq, XOg_FLDq, tv); emit_rma(as, XO_FLDq, XOg_FLDq, tv);
} else if (ir->o == IR_TONUM && !ra_used(ir) && } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
!irref_isk(ir->op1) && mayfuse(as, ir->op1)) { !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
IRIns *iri = IR(ir->op1); IRIns *iri = IR(ir->op1);
emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
@ -3753,15 +3719,7 @@ static void asm_ir(ASMState *as, IRIns *ir)
case IR_OBAR: asm_obar(as, ir); break; case IR_OBAR: asm_obar(as, ir); break;
/* Type conversions. */ /* Type conversions. */
case IR_TONUM: asm_tonum(as, ir); break;
case IR_TOINT:
if (irt_isguard(ir->t))
asm_tointg(as, ir, ra_alloc1(as, ir->op1, RSET_FPR));
else
asm_toint(as, ir); break;
break;
case IR_TOBIT: asm_tobit(as, ir); break; case IR_TOBIT: asm_tobit(as, ir); break;
case IR_TOI64: asm_toi64(as, ir); break;
case IR_CONV: asm_conv(as, ir); break; case IR_CONV: asm_conv(as, ir); break;
case IR_TOSTR: asm_tostr(as, ir); break; case IR_TOSTR: asm_tostr(as, ir); break;
case IR_STRTO: asm_strto(as, ir); break; case IR_STRTO: asm_strto(as, ir); break;
@ -3905,7 +3863,7 @@ static void asm_setup_regsp(ASMState *as, GCtrace *T)
} }
break; break;
/* Do not propagate hints across type conversions. */ /* Do not propagate hints across type conversions. */
case IR_CONV: case IR_TONUM: case IR_TOINT: case IR_TOBIT: case IR_CONV: case IR_TOBIT:
break; break;
default: default:
/* Propagate hints across likely 'op reg, imm' or 'op reg'. */ /* Propagate hints across likely 'op reg, imm' or 'op reg'. */

View File

@ -119,10 +119,7 @@
\ \
/* Type conversions. */ \ /* Type conversions. */ \
_(CONV, N , ref, lit) \ _(CONV, N , ref, lit) \
_(TONUM, N , ref, ___) \
_(TOINT, N , ref, lit) \
_(TOBIT, N , ref, ref) \ _(TOBIT, N , ref, ref) \
_(TOI64, N , ref, lit) \
_(TOSTR, N , ref, ___) \ _(TOSTR, N , ref, ___) \
_(STRTO, N , ref, ___) \ _(STRTO, N , ref, ___) \
\ \
@ -210,15 +207,6 @@ IRFLDEF(FLENUM)
#define IRXLOAD_READONLY 1 /* Load from read-only data. */ #define IRXLOAD_READONLY 1 /* Load from read-only data. */
#define IRXLOAD_UNALIGNED 2 /* Unaligned load. */ #define IRXLOAD_UNALIGNED 2 /* Unaligned load. */
/* TOINT/TOI64 mode, stored in op2. Ordered by strength of the checks. */
#define IRTOINT_CHECK 0 /* Number checked for integerness. */
#define IRTOINT_INDEX 1 /* Checked + special backprop rules. */
#define IRTOINT_ANY 2 /* Any FP number is ok. */
#define IRTOINT_ZEXT64 3 /* Convert uint32_t to int64_t. */
#define IRTOINT_SEXT64 4 /* Convert int32_t to int64_t. */
#define IRTOINT_TRUNCI64 5 /* Truncate number to int64_t. */
#define IRTOINT_TOBIT 6 /* Cache only: TOBIT conversion. */
/* CONV mode, stored in op2. */ /* CONV mode, stored in op2. */
#define IRCONV_SRCMASK 0x001f /* Source IRType. */ #define IRCONV_SRCMASK 0x001f /* Source IRType. */
#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */ #define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
@ -235,7 +223,6 @@ IRFLDEF(FLENUM)
#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */ #define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */ #define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
/* C call info for CALL* instructions. */ /* C call info for CALL* instructions. */
typedef struct CCallInfo { typedef struct CCallInfo {
ASMFunction func; /* Function pointer. */ ASMFunction func; /* Function pointer. */

View File

@ -441,12 +441,6 @@ LJFOLDF(kfold_strcmp)
/* -- Constant folding of conversions ------------------------------------- */ /* -- Constant folding of conversions ------------------------------------- */
LJFOLD(TONUM KINT)
LJFOLDF(kfold_tonum)
{
return lj_ir_knum(J, cast_num(fleft->i));
}
LJFOLD(TOBIT KNUM KNUM) LJFOLD(TOBIT KNUM KNUM)
LJFOLDF(kfold_tobit) LJFOLDF(kfold_tobit)
{ {
@ -455,40 +449,6 @@ LJFOLDF(kfold_tobit)
return INTFOLD((int32_t)tv.u32.lo); return INTFOLD((int32_t)tv.u32.lo);
} }
LJFOLD(TOINT KNUM any)
LJFOLDF(kfold_toint)
{
lua_Number n = knumleft;
int32_t k = lj_num2int(n);
if (irt_isguard(fins->t) && n != cast_num(k)) {
/* We're about to create a guard which always fails, like TOINT +1.5.
** Some pathological loops cause this during LICM, e.g.:
** local x,k,t = 0,1.5,{1,[1.5]=2}
** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
** assert(x == 300)
*/
return FAILFOLD;
}
return INTFOLD(k);
}
LJFOLD(TOI64 KINT any)
LJFOLDF(kfold_toi64_kint)
{
lua_assert(fins->op2 == IRTOINT_ZEXT64 || fins->op2 == IRTOINT_SEXT64);
if (fins->op2 == IRTOINT_ZEXT64)
return INT64FOLD((uint64_t)(uint32_t)fleft->i);
else
return INT64FOLD((uint64_t)(int32_t)fleft->i);
}
LJFOLD(TOI64 KNUM any)
LJFOLDF(kfold_toi64_knum)
{
lua_assert(fins->op2 == IRTOINT_TRUNCI64);
return INT64FOLD((uint64_t)(int64_t)knumleft);
}
LJFOLD(CONV KINT IRCONV_NUM_INT) LJFOLD(CONV KINT IRCONV_NUM_INT)
LJFOLDF(kfold_conv_kint_num) LJFOLDF(kfold_conv_kint_num)
{ {
@ -613,9 +573,6 @@ LJFOLDF(shortcut_round)
return NEXTFOLD; return NEXTFOLD;
} }
LJFOLD(FPMATH TONUM IRFPM_FLOOR)
LJFOLD(FPMATH TONUM IRFPM_CEIL)
LJFOLD(FPMATH TONUM IRFPM_TRUNC)
LJFOLD(ABS ABS KNUM) LJFOLD(ABS ABS KNUM)
LJFOLDF(shortcut_left) LJFOLDF(shortcut_left)
{ {
@ -640,32 +597,6 @@ LJFOLDF(shortcut_leftleft)
return fleft->op1; /* f(g(x)) ==> x */ return fleft->op1; /* f(g(x)) ==> x */
} }
LJFOLD(TONUM TOINT)
LJFOLDF(shortcut_leftleft_toint)
{
PHIBARRIER(fleft);
if (irt_isguard(fleft->t)) /* Only safe with a guarded TOINT. */
return fleft->op1; /* f(g(x)) ==> x */
return NEXTFOLD;
}
LJFOLD(TOINT TONUM any)
LJFOLD(TOBIT TONUM KNUM) /* The inverse must NOT be shortcut! */
LJFOLDF(shortcut_leftleft_across_phi)
{
/* Fold even across PHI to avoid expensive int->num->int conversions. */
return fleft->op1; /* f(g(x)) ==> x */
}
LJFOLD(TOI64 TONUM any)
LJFOLDF(shortcut_leftleft_toint64)
{
/* Fold even across PHI to avoid expensive int->num->int64 conversions. */
fins->op1 = fleft->op1; /* (int64_t)(double)(int)x ==> (int64_t)x */
fins->op2 = IRTOINT_SEXT64;
return RETRYFOLD;
}
/* -- FP algebraic simplifications ---------------------------------------- */ /* -- FP algebraic simplifications ---------------------------------------- */
/* FP arithmetic is tricky -- there's not much to simplify. /* FP arithmetic is tricky -- there's not much to simplify.
@ -969,63 +900,6 @@ LJFOLDF(narrow_convert)
return lj_opt_narrow_convert(J); return lj_opt_narrow_convert(J);
} }
/* Relaxed CSE rule for TOINT allows commoning with stronger checks, too. */
LJFOLD(TOINT any any)
LJFOLDF(cse_toint)
{
if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
IRRef ref, op1 = fins->op1;
uint8_t guard = irt_isguard(fins->t);
for (ref = J->chain[IR_TOINT]; ref > op1; ref = IR(ref)->prev)
if (IR(ref)->op1 == op1 && irt_isguard(IR(ref)->t) >= guard)
return ref;
}
return EMITFOLD; /* No fallthrough to regular CSE. */
}
/* -- Strength reduction of widening -------------------------------------- */
LJFOLD(TOI64 any 3) /* IRTOINT_ZEXT64 */
LJFOLDF(simplify_zext64)
{
#if LJ_TARGET_X64
/* Eliminate widening. All 32 bit ops implicitly zero-extend the result. */
PHIBARRIER(fleft);
return LEFTFOLD;
#else
UNUSED(J);
return NEXTFOLD;
#endif
}
LJFOLD(TOI64 any 4) /* IRTOINT_SEXT64 */
LJFOLDF(simplify_sext64)
{
IRRef ref = fins->op1;
int64_t ofs = 0;
PHIBARRIER(fleft);
if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
ofs = (int64_t)IR(fleft->op2)->i;
ref = fleft->op1;
}
/* Use scalar evolution analysis results to strength-reduce sign-extension. */
if (ref == J->scev.idx) {
IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
lua_assert(irt_isint(J->scev.t));
if (lo && IR(lo)->i + ofs >= 0) {
#if LJ_TARGET_X64
/* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
return LEFTFOLD;
#else
/* Reduce to a (cheaper) zero-extension. */
fins->op2 = IRTOINT_ZEXT64;
return RETRYFOLD;
#endif
}
}
return NEXTFOLD;
}
/* -- Integer algebraic simplifications ----------------------------------- */ /* -- Integer algebraic simplifications ----------------------------------- */
LJFOLD(ADD any KINT) LJFOLD(ADD any KINT)