Cleanup 64 bit IR type handling.

This commit is contained in:
Mike Pall 2010-12-05 19:49:29 +01:00
parent 6a7605ec85
commit b1fb71fb98
4 changed files with 48 additions and 30 deletions

View File

@ -129,20 +129,23 @@ local irtype_text = {
"tru", "tru",
"lud", "lud",
"str", "str",
"ptr", "p32",
"thr", "thr",
"pro", "pro",
"fun", "fun",
"t09", "p64",
"cdt", "cdt",
"tab", "tab",
"udt", "udt",
"num", "num",
"int",
"i8 ", "i8 ",
"u8 ", "u8 ",
"i16", "i16",
"u16", "u16",
"int",
"u32",
"i64",
"u64",
} }
local colortype_ansi = { local colortype_ansi = {
@ -165,6 +168,9 @@ local colortype_ansi = {
"\027[35m%s\027[m", "\027[35m%s\027[m",
"\027[35m%s\027[m", "\027[35m%s\027[m",
"\027[35m%s\027[m", "\027[35m%s\027[m",
"\027[35m%s\027[m",
"\027[35m%s\027[m",
"\027[35m%s\027[m",
} }
local function colorize_text(s, t) local function colorize_text(s, t)

View File

@ -631,7 +631,7 @@ static int32_t ra_spill(ASMState *as, IRIns *ir)
{ {
int32_t slot = ir->s; int32_t slot = ir->s;
if (!ra_hasspill(slot)) { if (!ra_hasspill(slot)) {
if (irt_isnum(ir->t) || (LJ_64 && irt_islightud(ir->t))) { if (irt_is64(ir->t)) {
slot = as->evenspill; slot = as->evenspill;
as->evenspill += 2; as->evenspill += 2;
} else if (as->oddspill) { } else if (as->oddspill) {
@ -661,14 +661,14 @@ static Reg ra_releasetmp(ASMState *as, IRRef ref)
return r; return r;
} }
/* Use 64 bit operations to handle 64 bit lightuserdata. */ /* Use 64 bit operations to handle 64 bit IR types. */
#define REX_64LU(ir, r) \ #define REX_64IR(ir, r) \
((r) | ((LJ_64 && irt_islightud((ir)->t)) ? REX_64 : 0)) ((r) | ((LJ_64 && irt_is64((ir)->t)) ? REX_64 : 0))
/* Generic move between two regs. */ /* Generic move between two regs. */
static void ra_movrr(ASMState *as, IRIns *ir, Reg r1, Reg r2) static void ra_movrr(ASMState *as, IRIns *ir, Reg r1, Reg r2)
{ {
emit_rr(as, r1 < RID_MAX_GPR ? XO_MOV : XMM_MOVRR(as), REX_64LU(ir, r1), r2); emit_rr(as, r1 < RID_MAX_GPR ? XO_MOV : XMM_MOVRR(as), REX_64IR(ir, r1), r2);
} }
/* Restore a register (marked as free). Rematerialize or force a spill. */ /* Restore a register (marked as free). Rematerialize or force a spill. */
@ -687,7 +687,7 @@ static Reg ra_restore(ASMState *as, IRRef ref)
ra_modified(as, r); ra_modified(as, r);
RA_DBGX((as, "restore $i $r", ir, r)); RA_DBGX((as, "restore $i $r", ir, r));
emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as),
REX_64LU(ir, r), RID_ESP, ofs); REX_64IR(ir, r), RID_ESP, ofs);
} }
return r; return r;
} }
@ -698,7 +698,7 @@ static void ra_save(ASMState *as, IRIns *ir, Reg r)
{ {
RA_DBGX((as, "save $i $r", ir, r)); RA_DBGX((as, "save $i $r", ir, r));
emit_rmro(as, r < RID_MAX_GPR ? XO_MOVto : XO_MOVSDto, emit_rmro(as, r < RID_MAX_GPR ? XO_MOVto : XO_MOVSDto,
REX_64LU(ir, r), RID_ESP, sps_scale(ir->s)); REX_64IR(ir, r), RID_ESP, sps_scale(ir->s));
} }
#define MINCOST(r) \ #define MINCOST(r) \
@ -1397,7 +1397,7 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
lj_trace_err(as->J, LJ_TRERR_NYICOAL); lj_trace_err(as->J, LJ_TRERR_NYICOAL);
r = ra_alloc1(as, args[n], allow & RSET_GPR); r = ra_alloc1(as, args[n], allow & RSET_GPR);
allow &= ~RID2RSET(r); allow &= ~RID2RSET(r);
emit_movtomro(as, REX_64LU(ir, r), RID_ESP, ofs); emit_movtomro(as, REX_64IR(ir, r), RID_ESP, ofs);
} }
ofs += sizeof(intptr_t); ofs += sizeof(intptr_t);
} }
@ -1849,7 +1849,7 @@ static void asm_newref(ASMState *as, IRIns *ir)
/* Otherwise use g->tmptv to hold the TValue. */ /* Otherwise use g->tmptv to hold the TValue. */
if (!irref_isk(ir->op2)) { if (!irref_isk(ir->op2)) {
Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp)); Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
emit_movtomro(as, REX_64LU(irkey, src), tmp, 0); emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
} else if (!irt_ispri(irkey->t)) { } else if (!irt_ispri(irkey->t)) {
emit_movmroi(as, tmp, 0, irkey->i); emit_movmroi(as, tmp, 0, irkey->i);
} }
@ -1918,13 +1918,11 @@ static void asm_fxload(ASMState *as, IRIns *ir)
case IRT_U8: xo = XO_MOVZXb; break; case IRT_U8: xo = XO_MOVZXb; break;
case IRT_I16: xo = XO_MOVSXw; break; case IRT_I16: xo = XO_MOVSXw; break;
case IRT_U16: xo = XO_MOVZXw; break; case IRT_U16: xo = XO_MOVZXw; break;
#if LJ_64
case IRT_LIGHTUD:
dest |= REX_64;
/* fallthrough */
#endif
default: default:
lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); if (LJ_64 && irt_is64(ir->t))
dest |= REX_64;
else
lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
xo = XO_MOV; xo = XO_MOV;
break; break;
} }
@ -1938,6 +1936,7 @@ static void asm_fstore(ASMState *as, IRIns *ir)
/* The IRT_I16/IRT_U16 stores should never be simplified for constant /* The IRT_I16/IRT_U16 stores should never be simplified for constant
** values since mov word [mem], imm16 has a length-changing prefix. ** values since mov word [mem], imm16 has a length-changing prefix.
*/ */
lua_assert(!(irref_isk(ir->op2) && irt_is64(ir->t))); /* NYI: KINT64. */
if (!irref_isk(ir->op2) || irt_isi16(ir->t) || irt_isu16(ir->t)) { if (!irref_isk(ir->op2) || irt_isi16(ir->t) || irt_isu16(ir->t)) {
RegSet allow8 = (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; RegSet allow8 = (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
src = ra_alloc1(as, ir->op2, allow8); src = ra_alloc1(as, ir->op2, allow8);
@ -1953,7 +1952,10 @@ static void asm_fstore(ASMState *as, IRIns *ir)
case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
#endif #endif
default: default:
lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); if (LJ_64 && irt_is64(ir->t))
src |= REX_64;
else
lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
xo = XO_MOVto; xo = XO_MOVto;
break; break;
} }
@ -1963,7 +1965,7 @@ static void asm_fstore(ASMState *as, IRIns *ir)
emit_i8(as, IR(ir->op2)->i); emit_i8(as, IR(ir->op2)->i);
emit_mrm(as, XO_MOVmib, 0, RID_MRM); emit_mrm(as, XO_MOVmib, 0, RID_MRM);
} else { } else {
lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
emit_i32(as, IR(ir->op2)->i); emit_i32(as, IR(ir->op2)->i);
emit_mrm(as, XO_MOVmi, 0, RID_MRM); emit_mrm(as, XO_MOVmi, 0, RID_MRM);
} }
@ -2664,7 +2666,7 @@ static void asm_comp_(ASMState *as, IRIns *ir, int cc)
asm_guardcc(as, cc); asm_guardcc(as, cc);
if (usetest && left != RID_MRM) { if (usetest && left != RID_MRM) {
/* Use test r,r instead of cmp r,0. */ /* Use test r,r instead of cmp r,0. */
emit_rr(as, XO_TEST, REX_64LU(ir, left), left); emit_rr(as, XO_TEST, REX_64IR(ir, left), left);
if (irl+1 == ir) /* Referencing previous ins? */ if (irl+1 == ir) /* Referencing previous ins? */
as->testmcp = as->mcp; /* Set flag to drop test r,r if possible. */ as->testmcp = as->mcp; /* Set flag to drop test r,r if possible. */
} else { } else {
@ -2683,7 +2685,7 @@ static void asm_comp_(ASMState *as, IRIns *ir, int cc)
Reg left = ra_alloc1(as, lref, RSET_GPR); Reg left = ra_alloc1(as, lref, RSET_GPR);
Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left)); Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left));
asm_guardcc(as, cc); asm_guardcc(as, cc);
emit_mrm(as, XO_CMP, REX_64LU(ir, left), right); emit_mrm(as, XO_CMP, REX_64IR(ir, left), right);
} }
} }
} }
@ -2762,7 +2764,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t)); lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t));
if (!irref_isk(ref)) { if (!irref_isk(ref)) {
Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
emit_movtomro(as, REX_64LU(ir, src), RID_BASE, ofs); emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
} else if (!irt_ispri(ir->t)) { } else if (!irt_ispri(ir->t)) {
emit_movmroi(as, RID_BASE, ofs, ir->i); emit_movmroi(as, RID_BASE, ofs, ir->i);
} }
@ -3189,7 +3191,7 @@ static void asm_head_side(ASMState *as)
int32_t ofs = sps_scale(regsp_spill(rs)); int32_t ofs = sps_scale(regsp_spill(rs));
ra_free(as, r); ra_free(as, r);
emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as),
REX_64LU(ir, r), RID_ESP, ofs); REX_64IR(ir, r), RID_ESP, ofs);
checkmclim(as); checkmclim(as);
} }
} }

View File

@ -89,6 +89,7 @@ typedef unsigned __int32 uintptr_t;
#define checku8(x) ((x) == (int32_t)(uint8_t)(x)) #define checku8(x) ((x) == (int32_t)(uint8_t)(x))
#define checki16(x) ((x) == (int32_t)(int16_t)(x)) #define checki16(x) ((x) == (int32_t)(int16_t)(x))
#define checku16(x) ((x) == (int32_t)(uint16_t)(x)) #define checku16(x) ((x) == (int32_t)(uint16_t)(x))
#define checki32(x) ((x) == (int32_t)(x))
#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x)) #define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
/* Every half-decent C compiler transforms this into a rotate instruction. */ /* Every half-decent C compiler transforms this into a rotate instruction. */

View File

@ -315,7 +315,7 @@ typedef enum {
IRT_THREAD, IRT_THREAD,
IRT_PROTO, IRT_PROTO,
IRT_FUNC, IRT_FUNC,
IRT_9, /* Unused (map of LJ_TTRACE). */ IRT_P64, /* IRT_P64 never escapes the IR (map of LJ_TTRACE). */
IRT_CDATA, IRT_CDATA,
IRT_TAB, IRT_TAB,
IRT_UDATA, IRT_UDATA,
@ -325,12 +325,15 @@ typedef enum {
** a TValue after implicit or explicit conversion (TONUM). Their types ** a TValue after implicit or explicit conversion (TONUM). Their types
** must be contiguous and next to IRT_NUM (see the typerange macros below). ** must be contiguous and next to IRT_NUM (see the typerange macros below).
*/ */
IRT_INT,
IRT_I8, IRT_I8,
IRT_U8, IRT_U8,
IRT_I16, IRT_I16,
IRT_U16, IRT_U16,
/* There is room for 13 more types. */ IRT_INT,
IRT_U32,
IRT_I64,
IRT_U64,
/* There is room for 10 more types. */
/* Additional flags. */ /* Additional flags. */
IRT_MARK = 0x20, /* Marker for misc. purposes. */ IRT_MARK = 0x20, /* Marker for misc. purposes. */
@ -370,11 +373,17 @@ typedef struct IRType1 { uint8_t irt; } IRType1;
#define irt_isu8(t) (irt_type(t) == IRT_U8) #define irt_isu8(t) (irt_type(t) == IRT_U8)
#define irt_isi16(t) (irt_type(t) == IRT_I16) #define irt_isi16(t) (irt_type(t) == IRT_I16)
#define irt_isu16(t) (irt_type(t) == IRT_U16) #define irt_isu16(t) (irt_type(t) == IRT_U16)
#define irt_isu32(t) (irt_type(t) == IRT_U32)
#define irt_isinteger(t) (irt_typerange((t), IRT_INT, IRT_U16)) #define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA)) #define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA)) #define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
#define IRT_IS64 \
((1u<<IRT_NUM) | (1u<<IRT_I64) | (1u<<IRT_U64) | (1u<<IRT_P64) | \
(LJ_64 ? (1u<<IRT_LIGHTUD) : 0))
#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
static LJ_AINLINE IRType itype2irt(const TValue *tv) static LJ_AINLINE IRType itype2irt(const TValue *tv)
{ {
if (tvisnum(tv)) if (tvisnum(tv))
@ -469,8 +478,8 @@ typedef uint32_t TRef;
#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE)) #define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE)) #define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE)) #define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
#define tref_isinteger(tr) (tref_typerange((tr), IRT_INT, IRT_U16)) #define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_U16)) #define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr))) #define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA)) #define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))