FFI: Save errno/GetLastError() around allocations, hooks etc.

This commit is contained in:
Mike Pall 2011-05-08 22:33:04 +02:00
parent 87553d9e77
commit 77ba7726e2
4 changed files with 72 additions and 5 deletions

View File

@ -23,7 +23,7 @@
#define lj_alloc_c #define lj_alloc_c
#define LUA_CORE #define LUA_CORE
/* To get the mremap prototype. Must be defind before any system includes. */ /* To get the mremap prototype. Must be defined before any system includes. */
#if defined(__linux__) && !defined(_GNU_SOURCE) #if defined(__linux__) && !defined(_GNU_SOURCE)
#define _GNU_SOURCE #define _GNU_SOURCE
#endif #endif
@ -98,18 +98,22 @@ static void INIT_MMAP(void)
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
static LJ_AINLINE void *CALL_MMAP(size_t size) static LJ_AINLINE void *CALL_MMAP(size_t size)
{ {
DWORD olderr = GetLastError();
void *ptr = NULL; void *ptr = NULL;
long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
SetLastError(olderr);
return st == 0 ? ptr : MFAIL; return st == 0 ? ptr : MFAIL;
} }
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
static LJ_AINLINE void *DIRECT_MMAP(size_t size) static LJ_AINLINE void *DIRECT_MMAP(size_t size)
{ {
DWORD olderr = GetLastError();
void *ptr = NULL; void *ptr = NULL;
long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE); MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
SetLastError(olderr);
return st == 0 ? ptr : MFAIL; return st == 0 ? ptr : MFAIL;
} }
@ -120,15 +124,19 @@ static LJ_AINLINE void *DIRECT_MMAP(size_t size)
/* Win32 MMAP via VirtualAlloc */ /* Win32 MMAP via VirtualAlloc */
static LJ_AINLINE void *CALL_MMAP(size_t size) static LJ_AINLINE void *CALL_MMAP(size_t size)
{ {
DWORD olderr = GetLastError();
void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
SetLastError(olderr);
return ptr ? ptr : MFAIL; return ptr ? ptr : MFAIL;
} }
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
static LJ_AINLINE void *DIRECT_MMAP(size_t size) static LJ_AINLINE void *DIRECT_MMAP(size_t size)
{ {
DWORD olderr = GetLastError();
void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
PAGE_READWRITE); PAGE_READWRITE);
SetLastError(olderr);
return ptr ? ptr : MFAIL; return ptr ? ptr : MFAIL;
} }
@ -137,6 +145,7 @@ static LJ_AINLINE void *DIRECT_MMAP(size_t size)
/* This function supports releasing coalesed segments */ /* This function supports releasing coalesed segments */
static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
{ {
DWORD olderr = GetLastError();
MEMORY_BASIC_INFORMATION minfo; MEMORY_BASIC_INFORMATION minfo;
char *cptr = (char *)ptr; char *cptr = (char *)ptr;
while (size) { while (size) {
@ -150,11 +159,13 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
cptr += minfo.RegionSize; cptr += minfo.RegionSize;
size -= minfo.RegionSize; size -= minfo.RegionSize;
} }
SetLastError(olderr);
return 0; return 0;
} }
#else #else
#include <errno.h>
#include <sys/mman.h> #include <sys/mman.h>
#define MMAP_PROT (PROT_READ|PROT_WRITE) #define MMAP_PROT (PROT_READ|PROT_WRITE)
@ -169,7 +180,13 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
#if LJ_TARGET_LINUX #if LJ_TARGET_LINUX
/* Actually this only gives us max. 1GB in current Linux kernels. */ /* Actually this only gives us max. 1GB in current Linux kernels. */
#define CALL_MMAP(s) mmap(NULL, (s), MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0) static LJ_AINLINE void *CALL_MMAP(size_t size)
{
int olderr = errno;
void *ptr = mmap(NULL, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
errno = olderr;
return ptr;
}
#elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
@ -188,6 +205,7 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
static LJ_AINLINE void *CALL_MMAP(size_t size) static LJ_AINLINE void *CALL_MMAP(size_t size)
{ {
int olderr = errno;
/* Hint for next allocation. Doesn't need to be thread-safe. */ /* Hint for next allocation. Doesn't need to be thread-safe. */
static uintptr_t alloc_hint = MMAP_REGION_START; static uintptr_t alloc_hint = MMAP_REGION_START;
int retry = 0; int retry = 0;
@ -205,6 +223,7 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
if ((uintptr_t)p >= MMAP_REGION_START && if ((uintptr_t)p >= MMAP_REGION_START &&
(uintptr_t)p + size < MMAP_REGION_END) { (uintptr_t)p + size < MMAP_REGION_END) {
alloc_hint = (uintptr_t)p + size; alloc_hint = (uintptr_t)p + size;
errno = olderr;
return p; return p;
} }
if (p != CMFAIL) munmap(p, size); if (p != CMFAIL) munmap(p, size);
@ -212,6 +231,7 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
retry = 1; retry = 1;
alloc_hint = MMAP_REGION_START; alloc_hint = MMAP_REGION_START;
} }
errno = olderr;
return CMFAIL; return CMFAIL;
} }
@ -224,17 +244,39 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
#else #else
/* 32 bit mode is easy. */ /* 32 bit mode is easy. */
#define CALL_MMAP(s) mmap(NULL, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) static LJ_AINLINE void *CALL_MMAP(size_t size)
{
int olderr = errno;
void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
errno = olderr;
return ptr;
}
#endif #endif
#define INIT_MMAP() ((void)0) #define INIT_MMAP() ((void)0)
#define DIRECT_MMAP(s) CALL_MMAP(s) #define DIRECT_MMAP(s) CALL_MMAP(s)
#define CALL_MUNMAP(a, s) munmap((a), (s))
static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
{
int olderr = errno;
int ret = munmap(ptr, size);
errno = olderr;
return ret;
}
#if LJ_TARGET_LINUX #if LJ_TARGET_LINUX
/* Need to define _GNU_SOURCE to get the mremap prototype. */ /* Need to define _GNU_SOURCE to get the mremap prototype. */
#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
int flags)
{
int olderr = errno;
ptr = mremap(ptr, osz, nsz, flags);
errno = olderr;
return ptr;
}
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
#define CALL_MREMAP_NOMOVE 0 #define CALL_MREMAP_NOMOVE 0
#define CALL_MREMAP_MAYMOVE 1 #define CALL_MREMAP_MAYMOVE 1
#if LJ_64 #if LJ_64

View File

@ -348,6 +348,7 @@ static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
/* Instruction dispatch. Used by instr/line/return hooks or when recording. */ /* Instruction dispatch. Used by instr/line/return hooks or when recording. */
void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc) void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
{ {
ERRNO_SAVE
GCfunc *fn = curr_func(L); GCfunc *fn = curr_func(L);
GCproto *pt = funcproto(fn); GCproto *pt = funcproto(fn);
void *cf = cframe_raw(L->cframe); void *cf = cframe_raw(L->cframe);
@ -382,6 +383,7 @@ void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
} }
if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1]))) if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1])))
callhook(L, LUA_HOOKRET, -1); callhook(L, LUA_HOOKRET, -1);
ERRNO_RESTORE
} }
/* Initialize call. Ensure stack space and return # of missing parameters. */ /* Initialize call. Ensure stack space and return # of missing parameters. */
@ -405,6 +407,7 @@ static int call_init(lua_State *L, GCfunc *fn)
/* Call dispatch. Used by call hooks, hot calls or when recording. */ /* Call dispatch. Used by call hooks, hot calls or when recording. */
ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
{ {
ERRNO_SAVE
GCfunc *fn = curr_func(L); GCfunc *fn = curr_func(L);
BCOp op; BCOp op;
global_State *g = G(L); global_State *g = G(L);
@ -443,6 +446,7 @@ out:
(op == BC_FUNCF || op == BC_FUNCV)) (op == BC_FUNCF || op == BC_FUNCV))
op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF); op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF);
#endif #endif
ERRNO_RESTORE
return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */ return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
} }

View File

@ -69,4 +69,21 @@ LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc); LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc);
#if LJ_HASFFI && !defined(_BUILDVM_H)
/* Save/restore errno and GetLastError() around hooks, exits and recording. */
#include <errno.h>
#if LJ_TARGET_WINDOWS
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
#else
#define ERRNO_SAVE int olderr = errno;
#define ERRNO_RESTORE errno = olderr;
#endif
#else
#define ERRNO_SAVE
#define ERRNO_RESTORE
#endif
#endif #endif

View File

@ -638,6 +638,7 @@ void lj_trace_ins(jit_State *J, const BCIns *pc)
/* A hotcount triggered. Start recording a root trace. */ /* A hotcount triggered. Start recording a root trace. */
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc) void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
{ {
ERRNO_SAVE
/* Note: pc is the interpreter bytecode PC here. It's offset by 1. */ /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]+1); /* Reset hotcount. */ hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]+1); /* Reset hotcount. */
/* Only start a new trace if not recording or inside __gc call or vmevent. */ /* Only start a new trace if not recording or inside __gc call or vmevent. */
@ -648,6 +649,7 @@ void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
J->state = LJ_TRACE_START; J->state = LJ_TRACE_START;
lj_trace_ins(J, pc-1); lj_trace_ins(J, pc-1);
} }
ERRNO_RESTORE
} }
/* Check for a hot side exit. If yes, start recording a side trace. */ /* Check for a hot side exit. If yes, start recording a side trace. */
@ -684,6 +686,7 @@ static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
/* A trace exited. Restore interpreter state. */ /* A trace exited. Restore interpreter state. */
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
{ {
ERRNO_SAVE
lua_State *L = J->L; lua_State *L = J->L;
ExitDataCP exd; ExitDataCP exd;
int errcode; int errcode;
@ -738,6 +741,7 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
} }
} }
/* Return MULTRES or 0. */ /* Return MULTRES or 0. */
ERRNO_RESTORE
switch (bc_op(*pc)) { switch (bc_op(*pc)) {
case BC_CALLM: case BC_CALLMT: case BC_CALLM: case BC_CALLMT:
return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc)); return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc));