diff --git a/src/.gitignore b/src/.gitignore index fc94e82c..1a30573c 100644 --- a/src/.gitignore +++ b/src/.gitignore @@ -4,4 +4,4 @@ lj_ffdef.h lj_libdef.h lj_recdef.h lj_folddef.h -lj_vm.s +lj_vm.[sS] diff --git a/src/Makefile b/src/Makefile index fae4c7ba..1d6145d8 100644 --- a/src/Makefile +++ b/src/Makefile @@ -439,7 +439,7 @@ BUILDVM_X= $(BUILDVM_T) HOST_O= $(MINILUA_O) $(BUILDVM_O) HOST_T= $(MINILUA_T) $(BUILDVM_T) -LJVM_S= lj_vm.s +LJVM_S= lj_vm.S LJVM_O= lj_vm.o LJVM_BOUT= $(LJVM_S) LJVM_MODE= elfasm @@ -647,7 +647,7 @@ lj_folddef.h: $(BUILDVM_T) lj_opt_fold.c $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< -%.o: %.s +%.o: %.S $(E) "ASM $@" $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< diff --git a/src/host/buildvm.c b/src/host/buildvm.c index 37b20ae2..d56c65ca 100644 --- a/src/host/buildvm.c +++ b/src/host/buildvm.c @@ -179,6 +179,7 @@ static int build_code(BuildCtx *ctx) ctx->nreloc = 0; ctx->globnames = globnames; + ctx->extnames = extnames; ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *)); ctx->nrelocsym = 0; for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1; diff --git a/src/host/buildvm.h b/src/host/buildvm.h index f9dc8c4f..b321bbda 100644 --- a/src/host/buildvm.h +++ b/src/host/buildvm.h @@ -82,6 +82,7 @@ typedef struct BuildCtx { const char *beginsym; /* Strings generated by DynASM. */ const char *const *globnames; + const char *const *extnames; const char *dasm_ident; const char *dasm_arch; /* Relocations. */ diff --git a/src/host/buildvm_asm.c b/src/host/buildvm_asm.c index 079e9a80..c91f5bcd 100644 --- a/src/host/buildvm_asm.c +++ b/src/host/buildvm_asm.c @@ -51,8 +51,8 @@ static const char *const jccnames[] = { "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg" }; -/* Emit relocation for the incredibly stupid OSX assembler. */ -static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n, +/* Emit x86/x64 text relocations. */ +static void emit_asm_reloc_text(BuildCtx *ctx, uint8_t *cp, int n, const char *sym) { const char *opname = NULL; @@ -71,6 +71,20 @@ err: exit(1); } emit_asm_bytes(ctx, cp, n); + if (strncmp(sym+(*sym == '_'), LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) { + /* Various fixups for external symbols outside of our binary. */ + if (ctx->mode == BUILD_elfasm) { + if (LJ_32) + fprintf(ctx->fp, "#if __PIC__\n\t%s lj_wrap_%s\n#else\n", opname, sym); + fprintf(ctx->fp, "\t%s %s@PLT\n", opname, sym); + if (LJ_32) + fprintf(ctx->fp, "#endif\n"); + return; + } else if (LJ_32 && ctx->mode == BUILD_machasm) { + fprintf(ctx->fp, "\t%s L%s$stub\n", opname, sym); + return; + } + } fprintf(ctx->fp, "\t%s %s\n", opname, sym); } #else @@ -254,8 +268,9 @@ void emit_asm(BuildCtx *ctx) BuildReloc *r = &ctx->reloc[rel]; int n = r->ofs - ofs; #if LJ_TARGET_X86ORX64 - if (ctx->mode == BUILD_machasm && r->type != 0) { - emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); + if (r->type != 0 && + (ctx->mode == BUILD_elfasm || ctx->mode == BUILD_machasm)) { + emit_asm_reloc_text(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); } else { emit_asm_bytes(ctx, ctx->code+ofs, n); emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]); diff --git a/src/lj_vmmath.c b/src/lj_vmmath.c index 63886aa7..b60858b2 100644 --- a/src/lj_vmmath.c +++ b/src/lj_vmmath.c @@ -13,15 +13,17 @@ #include "lj_ir.h" #include "lj_vm.h" -/* -- Helper functions for generated machine code ------------------------- */ +/* -- Wrapper functions --------------------------------------------------- */ -#if LJ_TARGET_X86ORX64 -/* Wrapper functions to avoid linker issues on OSX. */ -LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); } -LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); } -LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); } +#if LJ_TARGET_X86 && __ELF__ && __PIC__ +/* Wrapper functions to deal with the ELF/x86 PIC disaster. */ +LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); } +LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); } +LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); } #endif +/* -- Helper functions for generated machine code ------------------------- */ + #if !LJ_TARGET_X86ORX64 double lj_vm_foldarith(double x, double y, int op) { diff --git a/src/vm_x86.dasc b/src/vm_x86.dasc index a0c7cc60..cd43afbd 100644 --- a/src/vm_x86.dasc +++ b/src/vm_x86.dasc @@ -2084,7 +2084,7 @@ static void build_subroutines(BuildCtx *ctx) | movsd FPARG1, xmm0 |.endif | mov RB, BASE - | call extern lj_vm_ .. func + | call extern func | mov BASE, RB |.if X64 | jmp ->fff_resxmm0 @@ -5962,15 +5962,21 @@ static void emit_asm_debug(BuildCtx *ctx) "LEFDEY:\n\n", fcsize); } #endif -#if LJ_64 - fprintf(ctx->fp, "\t.subsections_via_symbols\n"); -#else +#if !LJ_64 fprintf(ctx->fp, "\t.non_lazy_symbol_pointer\n" "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" ".indirect_symbol _lj_err_unwind_dwarf\n" - ".long 0\n"); + ".long 0\n\n"); + fprintf(ctx->fp, "\t.section __IMPORT,__jump_table,symbol_stubs,pure_instructions+self_modifying_code,5\n"); + { + const char *const *xn; + for (xn = ctx->extnames; *xn; xn++) + if (strncmp(*xn, LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) + fprintf(ctx->fp, "L_%s$stub:\n\t.indirect_symbol _%s\n\t.ascii \"\\364\\364\\364\\364\\364\"\n", *xn, *xn); + } #endif + fprintf(ctx->fp, ".subsections_via_symbols\n"); } break; default: /* Difficult for other modes. */