From bc1a947f5b964ecee17048b41bcf1bc4d1a83fa0 Mon Sep 17 00:00:00 2001 From: dg <dg@51575b47-30f0-44d4-a5cc-537603b46e54> Date: Sat, 25 Jun 2011 01:02:09 +0000 Subject: [PATCH] Added support for LuaJIT2 (beta8) git-svn-id: http://svn.net-core.org/repos/t-engine4@3710 51575b47-30f0-44d4-a5cc-537603b46e54 --- build/te4core.lua | 17 +- game/thirdparty/jit/bc.lua | 192 + game/thirdparty/jit/bcsave.lua | 125 + game/thirdparty/jit/dis_arm.lua | 543 +++ game/thirdparty/jit/dis_x64.lua | 20 + game/thirdparty/jit/dis_x86.lua | 548 ++- game/thirdparty/jit/dump.lua | 773 +++- game/thirdparty/jit/v.lua | 162 + game/thirdparty/jit/vmdef.lua | 284 ++ game/thirdparty/lanes.lua | 2 +- premake4.lua | 2 +- src/luajit2/dynasm/dasm_arm.h | 448 ++ src/luajit2/dynasm/dasm_arm.lua | 949 ++++ src/luajit2/dynasm/dasm_ppc.h | 408 ++ src/luajit2/dynasm/dasm_ppc.lua | 1225 +++++ src/luajit2/dynasm/dasm_proto.h | 83 + src/luajit2/dynasm/dasm_x64.lua | 12 + src/luajit2/dynasm/dasm_x86.h | 470 ++ src/luajit2/dynasm/dasm_x86.lua | 1931 ++++++++ src/luajit2/dynasm/dynasm.lua | 1076 +++++ src/luajit2/src/buildvm.c | 506 ++ src/luajit2/src/buildvm.h | 104 + src/luajit2/src/buildvm_arm.dasc | 4043 ++++++++++++++++ src/luajit2/src/buildvm_arm.h | 7368 ++++++++++++++++++++++++++++++ src/luajit2/src/buildvm_asm.c | 262 ++ src/luajit2/src/buildvm_fold.c | 229 + src/luajit2/src/buildvm_lib.c | 377 ++ src/luajit2/src/buildvm_peobj.c | 334 ++ src/luajit2/src/buildvm_ppc.dasc | 3732 +++++++++++++++ src/luajit2/src/buildvm_ppcspe.h | 6108 +++++++++++++++++++++++++ src/luajit2/src/buildvm_x64.h | 3249 +++++++++++++ src/luajit2/src/buildvm_x64win.h | 3247 +++++++++++++ src/luajit2/src/buildvm_x86.dasc | 6216 +++++++++++++++++++++++++ src/luajit2/src/buildvm_x86.h | 3405 ++++++++++++++ src/luajit2/src/lauxlib.h | 159 + src/luajit2/src/lib_aux.c | 377 ++ src/luajit2/src/lib_base.c | 650 +++ src/luajit2/src/lib_bit.c | 74 + src/luajit2/src/lib_debug.c | 366 ++ src/luajit2/src/lib_ffi.c | 757 +++ src/luajit2/src/lib_init.c | 53 + src/luajit2/src/lib_io.c | 533 +++ src/luajit2/src/lib_jit.c | 635 +++ src/luajit2/src/lib_math.c | 218 + src/luajit2/src/lib_os.c | 256 ++ src/luajit2/src/lib_package.c | 527 +++ src/luajit2/src/lib_string.c | 855 ++++ src/luajit2/src/lib_table.c | 293 ++ src/luajit2/src/lj.supp | 16 + src/luajit2/src/lj_alloc.c | 1381 ++++++ src/luajit2/src/lj_alloc.h | 17 + src/luajit2/src/lj_api.c | 1214 +++++ src/luajit2/src/lj_arch.h | 266 ++ src/luajit2/src/lj_asm.c | 1653 +++++++ src/luajit2/src/lj_asm.h | 17 + src/luajit2/src/lj_asm_arm.h | 1860 ++++++++ src/luajit2/src/lj_asm_x86.h | 2685 +++++++++++ src/luajit2/src/lj_bc.c | 14 + src/luajit2/src/lj_bc.h | 261 ++ src/luajit2/src/lj_bcdump.h | 66 + src/luajit2/src/lj_bcread.c | 466 ++ src/luajit2/src/lj_bcwrite.c | 389 ++ src/luajit2/src/lj_carith.c | 309 ++ src/luajit2/src/lj_carith.h | 27 + src/luajit2/src/lj_ccall.c | 609 +++ src/luajit2/src/lj_ccall.h | 119 + src/luajit2/src/lj_cconv.c | 734 +++ src/luajit2/src/lj_cconv.h | 70 + src/luajit2/src/lj_cdata.c | 284 ++ src/luajit2/src/lj_cdata.h | 75 + src/luajit2/src/lj_char.c | 43 + src/luajit2/src/lj_char.h | 42 + src/luajit2/src/lj_clib.c | 370 ++ src/luajit2/src/lj_clib.h | 29 + src/luajit2/src/lj_cparse.c | 1829 ++++++++ src/luajit2/src/lj_cparse.h | 63 + src/luajit2/src/lj_crecord.c | 1159 +++++ src/luajit2/src/lj_crecord.h | 38 + src/luajit2/src/lj_ctype.c | 600 +++ src/luajit2/src/lj_ctype.h | 439 ++ src/luajit2/src/lj_debug.c | 487 ++ src/luajit2/src/lj_debug.h | 41 + src/luajit2/src/lj_def.h | 305 ++ src/luajit2/src/lj_dispatch.c | 459 ++ src/luajit2/src/lj_dispatch.h | 89 + src/luajit2/src/lj_emit_arm.h | 300 ++ src/luajit2/src/lj_emit_x86.h | 457 ++ src/luajit2/src/lj_err.c | 711 +++ src/luajit2/src/lj_err.h | 41 + src/luajit2/src/lj_errmsg.h | 171 + src/luajit2/src/lj_ff.h | 18 + src/luajit2/src/lj_ffrecord.c | 853 ++++ src/luajit2/src/lj_ffrecord.h | 24 + src/luajit2/src/lj_frame.h | 138 + src/luajit2/src/lj_func.c | 180 + src/luajit2/src/lj_func.h | 24 + src/luajit2/src/lj_gc.c | 838 ++++ src/luajit2/src/lj_gc.h | 133 + src/luajit2/src/lj_gdbjit.c | 762 +++ src/luajit2/src/lj_gdbjit.h | 22 + src/luajit2/src/lj_ir.c | 492 ++ src/luajit2/src/lj_ir.h | 538 +++ src/luajit2/src/lj_ircall.h | 224 + src/luajit2/src/lj_iropt.h | 159 + src/luajit2/src/lj_jit.h | 371 ++ src/luajit2/src/lj_lex.c | 507 ++ src/luajit2/src/lj_lex.h | 82 + src/luajit2/src/lj_lib.c | 260 ++ src/luajit2/src/lj_lib.h | 112 + src/luajit2/src/lj_mcode.c | 317 ++ src/luajit2/src/lj_mcode.h | 23 + src/luajit2/src/lj_meta.c | 463 ++ src/luajit2/src/lj_meta.h | 37 + src/luajit2/src/lj_obj.c | 35 + src/luajit2/src/lj_obj.h | 841 ++++ src/luajit2/src/lj_opt_dce.c | 79 + src/luajit2/src/lj_opt_fold.c | 2186 +++++++++ src/luajit2/src/lj_opt_loop.c | 385 ++ src/luajit2/src/lj_opt_mem.c | 861 ++++ src/luajit2/src/lj_opt_narrow.c | 647 +++ src/luajit2/src/lj_opt_split.c | 713 +++ src/luajit2/src/lj_parse.c | 2511 ++++++++++ src/luajit2/src/lj_parse.h | 18 + src/luajit2/src/lj_record.c | 2166 +++++++++ src/luajit2/src/lj_record.h | 43 + src/luajit2/src/lj_snap.c | 457 ++ src/luajit2/src/lj_snap.h | 34 + src/luajit2/src/lj_state.c | 280 ++ src/luajit2/src/lj_state.h | 35 + src/luajit2/src/lj_str.c | 415 ++ src/luajit2/src/lj_str.h | 53 + src/luajit2/src/lj_tab.c | 622 +++ src/luajit2/src/lj_tab.h | 67 + src/luajit2/src/lj_target.h | 142 + src/luajit2/src/lj_target_arm.h | 205 + src/luajit2/src/lj_target_x86.h | 330 ++ src/luajit2/src/lj_trace.c | 794 ++++ src/luajit2/src/lj_trace.h | 53 + src/luajit2/src/lj_traceerr.h | 61 + src/luajit2/src/lj_udata.c | 34 + src/luajit2/src/lj_udata.h | 14 + src/luajit2/src/lj_vm.h | 97 + src/luajit2/src/lj_vmevent.c | 56 + src/luajit2/src/lj_vmevent.h | 49 + src/luajit2/src/lj_vmmath.c | 111 + src/luajit2/src/lua.h | 388 ++ src/luajit2/src/luaconf.h | 131 + src/luajit2/src/luajit.c | 553 +++ src/luajit2/src/luajit.h | 70 + src/luajit2/src/lualib.h | 43 + src/luajit2/tome.patch | 149 + src/luajit2/vmdef.lua | 284 ++ src/main.c | 13 +- 153 files changed, 96637 insertions(+), 373 deletions(-) create mode 100644 game/thirdparty/jit/bc.lua create mode 100644 game/thirdparty/jit/bcsave.lua create mode 100644 game/thirdparty/jit/dis_arm.lua create mode 100644 game/thirdparty/jit/dis_x64.lua create mode 100644 game/thirdparty/jit/v.lua create mode 100644 game/thirdparty/jit/vmdef.lua create mode 100644 src/luajit2/dynasm/dasm_arm.h create mode 100644 src/luajit2/dynasm/dasm_arm.lua create mode 100644 src/luajit2/dynasm/dasm_ppc.h create mode 100644 src/luajit2/dynasm/dasm_ppc.lua create mode 100644 src/luajit2/dynasm/dasm_proto.h create mode 100644 src/luajit2/dynasm/dasm_x64.lua create mode 100644 src/luajit2/dynasm/dasm_x86.h create mode 100644 src/luajit2/dynasm/dasm_x86.lua create mode 100644 src/luajit2/dynasm/dynasm.lua create mode 100644 src/luajit2/src/buildvm.c create mode 100644 src/luajit2/src/buildvm.h create mode 100644 src/luajit2/src/buildvm_arm.dasc create mode 100644 src/luajit2/src/buildvm_arm.h create mode 100644 src/luajit2/src/buildvm_asm.c create mode 100644 src/luajit2/src/buildvm_fold.c create mode 100644 src/luajit2/src/buildvm_lib.c create mode 100644 src/luajit2/src/buildvm_peobj.c create mode 100644 src/luajit2/src/buildvm_ppc.dasc create mode 100644 src/luajit2/src/buildvm_ppcspe.h create mode 100644 src/luajit2/src/buildvm_x64.h create mode 100644 src/luajit2/src/buildvm_x64win.h create mode 100644 src/luajit2/src/buildvm_x86.dasc create mode 100644 src/luajit2/src/buildvm_x86.h create mode 100644 src/luajit2/src/lauxlib.h create mode 100644 src/luajit2/src/lib_aux.c create mode 100644 src/luajit2/src/lib_base.c create mode 100644 src/luajit2/src/lib_bit.c create mode 100644 src/luajit2/src/lib_debug.c create mode 100644 src/luajit2/src/lib_ffi.c create mode 100644 src/luajit2/src/lib_init.c create mode 100644 src/luajit2/src/lib_io.c create mode 100644 src/luajit2/src/lib_jit.c create mode 100644 src/luajit2/src/lib_math.c create mode 100644 src/luajit2/src/lib_os.c create mode 100644 src/luajit2/src/lib_package.c create mode 100644 src/luajit2/src/lib_string.c create mode 100644 src/luajit2/src/lib_table.c create mode 100644 src/luajit2/src/lj.supp create mode 100644 src/luajit2/src/lj_alloc.c create mode 100644 src/luajit2/src/lj_alloc.h create mode 100644 src/luajit2/src/lj_api.c create mode 100644 src/luajit2/src/lj_arch.h create mode 100644 src/luajit2/src/lj_asm.c create mode 100644 src/luajit2/src/lj_asm.h create mode 100644 src/luajit2/src/lj_asm_arm.h create mode 100644 src/luajit2/src/lj_asm_x86.h create mode 100644 src/luajit2/src/lj_bc.c create mode 100644 src/luajit2/src/lj_bc.h create mode 100644 src/luajit2/src/lj_bcdump.h create mode 100644 src/luajit2/src/lj_bcread.c create mode 100644 src/luajit2/src/lj_bcwrite.c create mode 100644 src/luajit2/src/lj_carith.c create mode 100644 src/luajit2/src/lj_carith.h create mode 100644 src/luajit2/src/lj_ccall.c create mode 100644 src/luajit2/src/lj_ccall.h create mode 100644 src/luajit2/src/lj_cconv.c create mode 100644 src/luajit2/src/lj_cconv.h create mode 100644 src/luajit2/src/lj_cdata.c create mode 100644 src/luajit2/src/lj_cdata.h create mode 100644 src/luajit2/src/lj_char.c create mode 100644 src/luajit2/src/lj_char.h create mode 100644 src/luajit2/src/lj_clib.c create mode 100644 src/luajit2/src/lj_clib.h create mode 100644 src/luajit2/src/lj_cparse.c create mode 100644 src/luajit2/src/lj_cparse.h create mode 100644 src/luajit2/src/lj_crecord.c create mode 100644 src/luajit2/src/lj_crecord.h create mode 100644 src/luajit2/src/lj_ctype.c create mode 100644 src/luajit2/src/lj_ctype.h create mode 100644 src/luajit2/src/lj_debug.c create mode 100644 src/luajit2/src/lj_debug.h create mode 100644 src/luajit2/src/lj_def.h create mode 100644 src/luajit2/src/lj_dispatch.c create mode 100644 src/luajit2/src/lj_dispatch.h create mode 100644 src/luajit2/src/lj_emit_arm.h create mode 100644 src/luajit2/src/lj_emit_x86.h create mode 100644 src/luajit2/src/lj_err.c create mode 100644 src/luajit2/src/lj_err.h create mode 100644 src/luajit2/src/lj_errmsg.h create mode 100644 src/luajit2/src/lj_ff.h create mode 100644 src/luajit2/src/lj_ffrecord.c create mode 100644 src/luajit2/src/lj_ffrecord.h create mode 100644 src/luajit2/src/lj_frame.h create mode 100644 src/luajit2/src/lj_func.c create mode 100644 src/luajit2/src/lj_func.h create mode 100644 src/luajit2/src/lj_gc.c create mode 100644 src/luajit2/src/lj_gc.h create mode 100644 src/luajit2/src/lj_gdbjit.c create mode 100644 src/luajit2/src/lj_gdbjit.h create mode 100644 src/luajit2/src/lj_ir.c create mode 100644 src/luajit2/src/lj_ir.h create mode 100644 src/luajit2/src/lj_ircall.h create mode 100644 src/luajit2/src/lj_iropt.h create mode 100644 src/luajit2/src/lj_jit.h create mode 100644 src/luajit2/src/lj_lex.c create mode 100644 src/luajit2/src/lj_lex.h create mode 100644 src/luajit2/src/lj_lib.c create mode 100644 src/luajit2/src/lj_lib.h create mode 100644 src/luajit2/src/lj_mcode.c create mode 100644 src/luajit2/src/lj_mcode.h create mode 100644 src/luajit2/src/lj_meta.c create mode 100644 src/luajit2/src/lj_meta.h create mode 100644 src/luajit2/src/lj_obj.c create mode 100644 src/luajit2/src/lj_obj.h create mode 100644 src/luajit2/src/lj_opt_dce.c create mode 100644 src/luajit2/src/lj_opt_fold.c create mode 100644 src/luajit2/src/lj_opt_loop.c create mode 100644 src/luajit2/src/lj_opt_mem.c create mode 100644 src/luajit2/src/lj_opt_narrow.c create mode 100644 src/luajit2/src/lj_opt_split.c create mode 100644 src/luajit2/src/lj_parse.c create mode 100644 src/luajit2/src/lj_parse.h create mode 100644 src/luajit2/src/lj_record.c create mode 100644 src/luajit2/src/lj_record.h create mode 100644 src/luajit2/src/lj_snap.c create mode 100644 src/luajit2/src/lj_snap.h create mode 100644 src/luajit2/src/lj_state.c create mode 100644 src/luajit2/src/lj_state.h create mode 100644 src/luajit2/src/lj_str.c create mode 100644 src/luajit2/src/lj_str.h create mode 100644 src/luajit2/src/lj_tab.c create mode 100644 src/luajit2/src/lj_tab.h create mode 100644 src/luajit2/src/lj_target.h create mode 100644 src/luajit2/src/lj_target_arm.h create mode 100644 src/luajit2/src/lj_target_x86.h create mode 100644 src/luajit2/src/lj_trace.c create mode 100644 src/luajit2/src/lj_trace.h create mode 100644 src/luajit2/src/lj_traceerr.h create mode 100644 src/luajit2/src/lj_udata.c create mode 100644 src/luajit2/src/lj_udata.h create mode 100644 src/luajit2/src/lj_vm.h create mode 100644 src/luajit2/src/lj_vmevent.c create mode 100644 src/luajit2/src/lj_vmevent.h create mode 100644 src/luajit2/src/lj_vmmath.c create mode 100644 src/luajit2/src/lua.h create mode 100644 src/luajit2/src/luaconf.h create mode 100644 src/luajit2/src/luajit.c create mode 100644 src/luajit2/src/luajit.h create mode 100644 src/luajit2/src/lualib.h create mode 100644 src/luajit2/tome.patch create mode 100644 src/luajit2/vmdef.lua diff --git a/build/te4core.lua b/build/te4core.lua index 86a010d636..ac7814c304 100644 --- a/build/te4core.lua +++ b/build/te4core.lua @@ -106,9 +106,20 @@ elseif _OPTIONS.lua == "jit2" then language "C" targetname "lua" - files { "../src/luajit2/src/*.c", "../src/luajit2/src/*.s", } --- configuration "linux" --- defines { "LUA_USE_POSIX" } + files { "../src/luajit2/src/*.c", "../src/luajit2/src/*.s", "../src/luajit2/src/lj_vm.s", "../src/luajit2/src/lj_bcdef.h", "../src/luajit2/src/lj_ffdef.h", "../src/luajit2/src/lj_ffdef.h", "../src/luajit2/src/lj_libdef.h", "../src/luajit2/src/lj_recdef.h", "../src/luajit2/src/lj_folddef.h" } + + configuration "linux" + local list = "../src/luajit2/src/lib_base.c ../src/luajit2/src/lib_math.c ../src/luajit2/src/lib_bit.c ../src/luajit2/src/lib_string.c ../src/luajit2/src/lib_table.c ../src/luajit2/src/lib_io.c ../src/luajit2/src/lib_os.c ../src/luajit2/src/lib_package.c ../src/luajit2/src/lib_debug.c ../src/luajit2/src/lib_jit.c ../src/luajit2/src/lib_ffi.c" + prebuildcommands{ + "gcc -o ../src/luajit2/src/buildvm ../src/luajit2/src/buildvm*.c", + "../src/luajit2/src/buildvm -m elfasm -o ../src/luajit2/src/lj_vm.s", + "../src/luajit2/src/buildvm -m bcdef -o ../src/luajit2/src/lj_bcdef.h "..list, + "../src/luajit2/src/buildvm -m ffdef -o ../src/luajit2/src/lj_ffdef.h "..list, + "../src/luajit2/src/buildvm -m libdef -o ../src/luajit2/src/lj_libdef.h "..list, + "../src/luajit2/src/buildvm -m recdef -o ../src/luajit2/src/lj_recdef.h "..list, + "../src/luajit2/src/buildvm -m vmdef -o ../src/luajit2/vmdef.lua "..list, + "../src/luajit2/src/buildvm -m folddef -o ../src/luajit2/src/lj_folddef.h ../src/luajit2/src/lj_opt_fold.c", + } end project "luasocket" diff --git a/game/thirdparty/jit/bc.lua b/game/thirdparty/jit/bc.lua new file mode 100644 index 0000000000..f4ee8e8ab3 --- /dev/null +++ b/game/thirdparty/jit/bc.lua @@ -0,0 +1,192 @@ +---------------------------------------------------------------------------- +-- LuaJIT bytecode listing module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module lists the bytecode of a Lua function. If it's loaded by -jbc +-- it hooks into the parser and lists all functions of a chunk as they +-- are parsed. +-- +-- Example usage: +-- +-- luajit -jbc -e 'local x=0; for i=1,1e6 do x=x+i end; print(x)' +-- luajit -jbc=- foo.lua +-- luajit -jbc=foo.list foo.lua +-- +-- Default output is to stderr. To redirect the output to a file, pass a +-- filename as an argument (use '-' for stdout) or set the environment +-- variable LUAJIT_LISTFILE. The file is overwritten every time the module +-- is started. +-- +-- This module can also be used programmatically: +-- +-- local bc = require("jit.bc") +-- +-- local function foo() print("hello") end +-- +-- bc.dump(foo) --> -- BYTECODE -- [...] +-- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello" +-- +-- local out = { +-- -- Do something with each line: +-- write = function(t, ...) io.write(...) end, +-- close = function(t) end, +-- flush = function(t) end, +-- } +-- bc.dump(foo, out) +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20000, "LuaJIT core/library version mismatch") +local jutil = require("jit.util") +local vmdef = require("jit.vmdef") +local bit = require("bit") +local sub, gsub, format = string.sub, string.gsub, string.format +local byte, band, shr = string.byte, bit.band, bit.rshift +local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck +local funcuvname = jutil.funcuvname +local bcnames = vmdef.bcnames +local stdout, stderr = io.stdout, io.stderr + +------------------------------------------------------------------------------ + +local function ctlsub(c) + if c == "\n" then return "\\n" + elseif c == "\r" then return "\\r" + elseif c == "\t" then return "\\t" + elseif c == "\r" then return "\\r" + else return format("\\%03d", byte(c)) + end +end + +-- Return one bytecode line. +local function bcline(func, pc, prefix) + local ins, m = funcbc(func, pc) + if not ins then return end + local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128) + local a = band(shr(ins, 8), 0xff) + local oidx = 6*band(ins, 0xff) + local op = sub(bcnames, oidx+1, oidx+6) + local s = format("%04d %s %-6s %3s ", + pc, prefix or " ", op, ma == 0 and "" or a) + local d = shr(ins, 16) + if mc == 13*128 then -- BCMjump + return format("%s=> %04d\n", s, pc+d-0x7fff) + end + if mb ~= 0 then + d = band(d, 0xff) + elseif mc == 0 then + return s.."\n" + end + local kc + if mc == 10*128 then -- BCMstr + kc = funck(func, -d-1) + kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub)) + elseif mc == 9*128 then -- BCMnum + kc = funck(func, d) + if op == "TSETM " then kc = kc - 2^52 end + elseif mc == 12*128 then -- BCMfunc + local fi = funcinfo(funck(func, -d-1)) + if fi.ffid then + kc = vmdef.ffnames[fi.ffid] + else + kc = fi.loc + end + elseif mc == 5*128 then -- BCMuv + kc = funcuvname(func, d) + end + if ma == 5 then -- BCMuv + local ka = funcuvname(func, a) + if kc then kc = ka.." ; "..kc else kc = ka end + end + if mb ~= 0 then + local b = shr(ins, 24) + if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end + return format("%s%3d %3d\n", s, b, d) + end + if kc then return format("%s%3d ; %s\n", s, d, kc) end + if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits + return format("%s%3d\n", s, d) +end + +-- Collect branch targets of a function. +local function bctargets(func) + local target = {} + for pc=1,1000000000 do + local ins, m = funcbc(func, pc) + if not ins then break end + if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end + end + return target +end + +-- Dump bytecode instructions of a function. +local function bcdump(func, out, all) + if not out then out = stdout end + local fi = funcinfo(func) + if all and fi.children then + for n=-1,-1000000000,-1 do + local k = funck(func, n) + if not k then break end + if type(k) == "proto" then bcdump(k, out, true) end + end + end + out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined)) + local target = bctargets(func) + for pc=1,1000000000 do + local s = bcline(func, pc, target[pc] and "=>") + if not s then break end + out:write(s) + end + out:write("\n") + out:flush() +end + +------------------------------------------------------------------------------ + +-- Active flag and output file handle. +local active, out + +-- List handler. +local function h_list(func) + return bcdump(func, out) +end + +-- Detach list handler. +local function bclistoff() + if active then + active = false + jit.attach(h_list) + if out and out ~= stdout and out ~= stderr then out:close() end + out = nil + end +end + +-- Open the output file and attach list handler. +local function bcliston(outfile) + if active then bclistoff() end + if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stderr + end + jit.attach(h_list, "bc") + active = true +end + +-- Public module functions. +module(...) + +line = bcline +dump = bcdump +targets = bctargets + +on = bcliston +off = bclistoff +start = bcliston -- For -j command line option. + diff --git a/game/thirdparty/jit/bcsave.lua b/game/thirdparty/jit/bcsave.lua new file mode 100644 index 0000000000..c34bec8960 --- /dev/null +++ b/game/thirdparty/jit/bcsave.lua @@ -0,0 +1,125 @@ +---------------------------------------------------------------------------- +-- LuaJIT module to save/list bytecode. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module saves or lists the bytecode for an input file. +-- It's run by the -b command line option. +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20000, "LuaJIT core/library version mismatch") + +------------------------------------------------------------------------------ + +local function usage() + io.stderr:write[[ +Save LuaJIT bytecode: luajit -b[options] input output + -l Only list bytecode. + -s Strip debug info (default). + -g Keep debug info. + -e chunk Use chunk string as input. + -- Stop handling options. + - Use stdin as input and/or stdout as output. +]] + os.exit(1) +end + +local function readfile(input) + if type(input) == "function" then return input end + if input == "-" then input = nil end + local f, err = loadfile(input) + if not f then + io.stderr:write("luajit: ", err, "\n") + os.exit(1) + end + return f +end + +local function readstring(input) + local f, err = loadstring(input) + if not f then + io.stderr:write("luajit: ", err, "\n") + os.exit(1) + end + return f +end + +local function savefile(name, mode) + if name == "-" then return io.stdout end + local fp, err = io.open(name, mode) + if not fp then + io.stderr:write("luajit: cannot write ", err, "\n") + os.exit(1) + end + return fp +end + +------------------------------------------------------------------------------ + +local function bclist(input, output) + local f = readfile(input) + require("jit.bc").dump(f, savefile(output, "w"), true) +end + +local function bcsave(input, output, strip) + local f = readfile(input) + local s = string.dump(f, strip) + local fp = savefile(output, "wb") + local ok, err = fp:write(s) + if ok and output ~= "-" then ok, err = fp:close() end + if not ok then + io.stderr:write("luajit: cannot write ", arg[2], ": ", err, "\n") + os.exit(1) + end +end + +local function docmd(...) + local arg = {...} + local n = 1 + local list = false + local strip = true + while n <= #arg do + local a = arg[n] + if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then + if a == "--" then table.remove(arg, n); break end + for m=2,#a do + local opt = string.sub(a, m, m) + if opt == "l" then + list = true + elseif opt == "s" then + strip = true + elseif opt == "g" then + strip = false + elseif opt == "e" then + if n ~= 1 or #arg < 2 or m ~= #a then usage() end + arg[2] = readstring(arg[2]) + else + usage() + end + end + table.remove(arg, n) + else + n = n + 1 + end + end + if list then + if #arg == 0 or #arg > 2 then usage() end + bclist(arg[1], arg[2] or "-") + else + if #arg ~= 2 then usage() end + bcsave(arg[1], arg[2], strip) + end +end + +------------------------------------------------------------------------------ + +-- Public module functions. +module(...) + +start = docmd -- Process -b command line option. + diff --git a/game/thirdparty/jit/dis_arm.lua b/game/thirdparty/jit/dis_arm.lua new file mode 100644 index 0000000000..e4e98afc06 --- /dev/null +++ b/game/thirdparty/jit/dis_arm.lua @@ -0,0 +1,543 @@ +---------------------------------------------------------------------------- +-- LuaJIT ARM disassembler module. +-- +-- Copyright (C) 2005-2010 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This is a helper module used by the LuaJIT machine code dumper module. +-- +-- It disassembles most user-mode ARMv7 instructions +-- NYI: Advanced SIMD and VFP instructions. +------------------------------------------------------------------------------ + +local type = type +local sub, byte, format = string.sub, string.byte, string.format +local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local concat = table.concat +local bit = require("bit") +local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex +local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift + +------------------------------------------------------------------------------ +-- Opcode maps +------------------------------------------------------------------------------ + +local map_loadc = { + shift = 9, mask = 7, + [5] = { + shift = 0, mask = 0 -- NYI VFP load/store. + }, + _ = { + shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc. + }, +} + +local map_datac = { + shift = 24, mask = 1, + [0] = { + shift = 9, mask = 7, + [5] = { + shift = 0, mask = 0 -- NYI VFP data. + }, + _ = { + shift = 0, mask = 0 -- NYI cdp, mcr, mrc. + }, + }, + "svcT", +} + +local map_loadcu = { + shift = 0, mask = 0, -- NYI unconditional CP load/store. +} + +local map_datacu = { + shift = 0, mask = 0, -- NYI unconditional CP data. +} + +local map_simddata = { + shift = 0, mask = 0, -- NYI SIMD data. +} + +local map_simdload = { + shift = 0, mask = 0, -- NYI SIMD load/store, preload. +} + +local map_preload = { + shift = 0, mask = 0, -- NYI preload. +} + +local map_media = { + shift = 20, mask = 31, + [0] = false, + { --01 + shift = 5, mask = 7, + [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM", + "sadd8DNM", false, false, "ssub8DNM", + }, + { --02 + shift = 5, mask = 7, + [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM", + "qadd8DNM", false, false, "qsub8DNM", + }, + { --03 + shift = 5, mask = 7, + [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM", + "shadd8DNM", false, false, "shsub8DNM", + }, + false, + { --05 + shift = 5, mask = 7, + [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM", + "uadd8DNM", false, false, "usub8DNM", + }, + { --06 + shift = 5, mask = 7, + [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM", + "uqadd8DNM", false, false, "uqsub8DNM", + }, + { --07 + shift = 5, mask = 7, + [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM", + "uhadd8DNM", false, false, "uhsub8DNM", + }, + { --08 + shift = 5, mask = 7, + [0] = "pkhbtDNMU", false, "pkhtbDNMU", + { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", }, + "pkhbtDNMU", "selDNM", "pkhtbDNMU", + }, + false, + { --0a + shift = 5, mask = 7, + [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu", + { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", }, + "ssatDxMu", false, "ssatDxMu", + }, + { --0b + shift = 5, mask = 7, + [0] = "ssatDxMu", "revDM", "ssatDxMu", + { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", }, + "ssatDxMu", "rev16DM", "ssatDxMu", + }, + { --0c + shift = 5, mask = 7, + [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", }, + }, + false, + { --0e + shift = 5, mask = 7, + [0] = "usatDwMu", "usat16DwM", "usatDwMu", + { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", }, + "usatDwMu", false, "usatDwMu", + }, + { --0f + shift = 5, mask = 7, + [0] = "usatDwMu", "rbitDM", "usatDwMu", + { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", }, + "usatDwMu", "revshDM", "usatDwMu", + }, + { --10 + shift = 12, mask = 15, + [15] = { + shift = 5, mask = 7, + "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS", + }, + _ = { + shift = 5, mask = 7, + [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD", + }, + }, + false, false, false, + { --14 + shift = 5, mask = 7, + [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS", + }, + { --15 + shift = 5, mask = 7, + [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", }, + { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", }, + false, false, false, false, + "smmlsNMSD", "smmlsrNMSD", + }, + false, false, + { --18 + shift = 5, mask = 7, + [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", }, + }, + false, + { --1a + shift = 5, mask = 3, [2] = "sbfxDMvw", + }, + { --1b + shift = 5, mask = 3, [2] = "sbfxDMvw", + }, + { --1c + shift = 5, mask = 3, + [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, + }, + { --1d + shift = 5, mask = 3, + [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", }, + }, + { --1e + shift = 5, mask = 3, [2] = "ubfxDMvw", + }, + { --1f + shift = 5, mask = 3, [2] = "ubfxDMvw", + }, +} + +local map_load = { + shift = 21, mask = 9, + { + shift = 20, mask = 5, + [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL", + }, + _ = { + shift = 20, mask = 5, + [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL", + } +} + +local map_load1 = { + shift = 4, mask = 1, + [0] = map_load, map_media, +} + +local map_loadm = { + shift = 20, mask = 1, + [0] = { + shift = 23, mask = 3, + [0] = "stmdaNR", "stmNR", + { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR", + }, + { + shift = 23, mask = 3, + [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", }, + "ldmdbNR", "ldmibNR", + }, +} + +local map_data = { + shift = 21, mask = 15, + [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs", + "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs", + "tstNP", "teqNP", "cmpNP", "cmnNP", + "orrDNPs", "movDPs", "bicDNPs", "mvnDPs", +} + +local map_mul = { + shift = 21, mask = 7, + [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS", + "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs", +} + +local map_sync = { + shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd. + [0] = "swpDMN", false, false, false, + "swpbDMN", false, false, false, + "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN", + "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN", +} + +local map_mulh = { + shift = 21, mask = 3, + [0] = { shift = 5, mask = 3, + [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", }, + { shift = 5, mask = 3, + [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", }, + { shift = 5, mask = 3, + [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", }, + { shift = 5, mask = 3, + [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", }, +} + +local map_misc = { + shift = 4, mask = 7, + -- NYI: decode PSR bits of msr. + [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", }, + { shift = 21, mask = 3, "bxM", false, "clzDM", }, + { shift = 21, mask = 3, "bxjM", }, + { shift = 21, mask = 3, "blxM", }, + false, + { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", }, + false, + { shift = 21, mask = 3, "bkptK", }, +} + +local map_datar = { + shift = 4, mask = 9, + [9] = { + shift = 5, mask = 3, + [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, }, + { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", }, + { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", }, + { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", }, + }, + _ = { + shift = 20, mask = 25, + [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, }, + _ = { + shift = 0, mask = 0xffffffff, + [bor(0xe1a00000)] = "nop", + _ = map_data, + } + }, +} + +local map_datai = { + shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12. + [16] = "movwDW", [20] = "movtDW", + [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", }, + [22] = "msrNW", + _ = map_data, +} + +local map_branch = { + shift = 24, mask = 1, + [0] = "bB", "blB" +} + +local map_condins = { + [0] = map_datar, map_datai, map_load, map_load1, + map_loadm, map_branch, map_loadc, map_datac +} + +-- NYI: setend. +local map_uncondins = { + [0] = false, map_simddata, map_simdload, map_preload, + false, "blxB", map_loadcu, map_datacu, +} + +------------------------------------------------------------------------------ + +local map_gpr = { + [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc", +} + +local map_cond = { + [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", +} + +local map_shift = { [0] = "lsl", "lsr", "asr", "ror", } + +------------------------------------------------------------------------------ + +-- Output a nicely formatted line with an opcode and operands. +local function putop(ctx, text, operands) + local pos = ctx.pos + local extra = "" + if ctx.rel then + local sym = ctx.symtab[ctx.rel] + if sym then + extra = "\t->"..sym + elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then + extra = "\t; 0x"..tohex(ctx.rel) + end + end + if ctx.hexdump > 0 then + ctx.out(format("%08x %s %-5s %s%s\n", + ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra)) + else + ctx.out(format("%08x %-5s %s%s\n", + ctx.addr+pos, text, concat(operands, ", "), extra)) + end + ctx.pos = pos + 4 +end + +-- Fallback for unknown opcodes. +local function unknown(ctx) + return putop(ctx, ".long", { "0x"..tohex(ctx.op) }) +end + +-- Format operand 2 of load/store opcodes. +local function fmtload(ctx, op, pos) + local base = map_gpr[band(rshift(op, 16), 15)] + local x, ofs + local ext = (band(op, 0x04000000) == 0) + if not ext and band(op, 0x02000000) == 0 then + ofs = band(op, 4095) + if band(op, 0x00800000) == 0 then ofs = -ofs end + if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end + ofs = "#"..ofs + elseif ext and band(op, 0x00400000) ~= 0 then + ofs = band(op, 15) + band(rshift(op, 4), 0xf0) + if band(op, 0x00800000) == 0 then ofs = -ofs end + if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end + ofs = "#"..ofs + else + ofs = map_gpr[band(op, 15)] + if ext or band(op, 0xfe0) == 0 then + elseif band(op, 0xfe0) == 0x60 then + ofs = format("%s, rrx", ofs) + else + local sh = band(rshift(op, 7), 31) + if sh == 0 then sh = 32 end + ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh) + end + if band(op, 0x00800000) == 0 then ofs = "-"..ofs end + end + if ofs == "#0" then + x = format("[%s]", base) + elseif band(op, 0x01000000) == 0 then + x = format("[%s], %s", base, ofs) + else + x = format("[%s, %s]", base, ofs) + end + if band(op, 0x01200000) == 0x01200000 then x = x.."!" end + return x +end + +-- Disassemble a single instruction. +local function disass_ins(ctx) + local pos = ctx.pos + local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4) + local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0) + local operands = {} + local suffix = "" + local last, name, pat + ctx.op = op + ctx.rel = nil + + local cond = rshift(op, 28) + local opat + if cond == 15 then + opat = map_uncondins[band(rshift(op, 25), 7)] + else + if cond ~= 14 then suffix = map_cond[cond] end + opat = map_condins[band(rshift(op, 25), 7)] + end + while type(opat) ~= "string" do + if not opat then return unknown(ctx) end + opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ + end + name, pat = match(opat, "^([a-z0-9]*)(.*)") + + for p in gmatch(pat, ".") do + local x = nil + if p == "D" then + x = map_gpr[band(rshift(op, 12), 15)] + elseif p == "N" then + x = map_gpr[band(rshift(op, 16), 15)] + elseif p == "S" then + x = map_gpr[band(rshift(op, 8), 15)] + elseif p == "M" then + x = map_gpr[band(op, 15)] + elseif p == "P" then + if band(op, 0x02000000) ~= 0 then + x = ror(band(op, 255), 2*band(rshift(op, 8), 15)) + else + x = map_gpr[band(op, 15)] + if band(op, 0xff0) ~= 0 then + operands[#operands+1] = x + local s = map_shift[band(rshift(op, 5), 3)] + local r = nil + if band(op, 0xf90) == 0 then + if s == "ror" then s = "rrx" else r = "#32" end + elseif band(op, 0x10) == 0 then + r = "#"..band(rshift(op, 7), 31) + else + r = map_gpr[band(rshift(op, 8), 15)] + end + if name == "mov" then name = s; x = r + elseif r then x = format("%s %s", s, r) + else x = s end + end + end + elseif p == "L" then + x = fmtload(ctx, op, pos, false) + elseif p == "B" then + local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6) + if cond == 15 then addr = addr + band(rshift(op, 23), 2) end + ctx.rel = addr + x = "0x"..tohex(addr) + elseif p == "R" then + if band(op, 0x00200000) ~= 0 and #operands == 1 then + operands[1] = operands[1].."!" + end + local t = {} + for i=0,15 do + if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end + end + x = "{"..concat(t, ", ").."}" + elseif p == "W" then + x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000) + elseif p == "T" then + x = "#0x"..tohex(band(op, 0x00ffffff), 6) + elseif p == "U" then + x = band(rshift(op, 7), 31) + if x == 0 then x = nil end + elseif p == "u" then + x = band(rshift(op, 7), 31) + if band(op, 0x40) == 0 then + if x == 0 then x = nil else x = "lsl #"..x end + else + if x == 0 then x = "asr #32" else x = "asr #"..x end + end + elseif p == "v" then + x = band(rshift(op, 7), 31) + elseif p == "w" then + x = band(rshift(op, 16), 31) + elseif p == "x" then + x = band(rshift(op, 16), 31) + 1 + elseif p == "X" then + x = band(rshift(op, 16), 31) - last + 1 + elseif p == "K" then + x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4) + elseif p == "s" then + if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end + else + assert(false) + end + if x then + last = x + if type(x) == "number" then x = "#"..x end + operands[#operands+1] = x + end + end + + return putop(ctx, name..suffix, operands) +end + +------------------------------------------------------------------------------ + +-- Disassemble a block of code. +local function disass_block(ctx, ofs, len) + if not ofs then ofs = 0 end + local stop = len and ofs+len or #ctx.code + ctx.pos = ofs + ctx.rel = nil + while ctx.pos < stop do disass_ins(ctx) end +end + +-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). +local function create_(code, addr, out) + local ctx = {} + ctx.code = code + ctx.addr = addr or 0 + ctx.out = out or io.write + ctx.symtab = {} + ctx.disass = disass_block + ctx.hexdump = 8 + return ctx +end + +-- Simple API: disassemble code (a string) at address and output via out. +local function disass_(code, addr, out) + create_(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname_(r) + return map_gpr[r] +end + +-- Public module functions. +module(...) + +create = create_ +disass = disass_ +regname = regname_ + diff --git a/game/thirdparty/jit/dis_x64.lua b/game/thirdparty/jit/dis_x64.lua new file mode 100644 index 0000000000..6ba4914306 --- /dev/null +++ b/game/thirdparty/jit/dis_x64.lua @@ -0,0 +1,20 @@ +---------------------------------------------------------------------------- +-- LuaJIT x64 disassembler wrapper module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- This module just exports the 64 bit functions from the combined +-- x86/x64 disassembler module. All the interesting stuff is there. +------------------------------------------------------------------------------ + +local require = require + +module(...) + +local dis_x86 = require(_PACKAGE.."dis_x86") + +create = dis_x86.create64 +disass = dis_x86.disass64 +regname = dis_x86.regname64 + diff --git a/game/thirdparty/jit/dis_x86.lua b/game/thirdparty/jit/dis_x86.lua index 1de88e0570..f489a86123 100644 --- a/game/thirdparty/jit/dis_x86.lua +++ b/game/thirdparty/jit/dis_x86.lua @@ -1,8 +1,8 @@ ---------------------------------------------------------------------------- --- LuaJIT x86 disassembler module. +-- LuaJIT x86/x64 disassembler module. -- --- Copyright (C) 2005-2010 Mike Pall. All rights reserved. --- Released under the MIT/X license. See luajit.h for full copyright notice. +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h ---------------------------------------------------------------------------- -- This is a helper module used by the LuaJIT machine code dumper module. -- @@ -13,28 +13,24 @@ -- The output format is very similar to what ndisasm generates. But it has -- been developed independently by looking at the opcode tables from the -- Intel and AMD manuals. The supported instruction set is quite extensive --- and reflects what a current generation P4 or K8 implements in 32 bit --- mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3 and even privileged +-- and reflects what a current generation Intel or AMD CPU implements in +-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3, +-- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM) -- instructions. -- -- Notes: -- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported. -- * No attempt at optimization has been made -- it's fast enough for my needs. -- * The public API may change when more architectures are added. --- --- TODO: --- * More testing with arbitrary x86 code (not just LuaJIT generated code). --- * The output for a few MMX/SSE opcodes could be improved. --- * Adding x64 support would be straightforward. --- * Better input API (iterator) and output API (structured access to instr). ------------------------------------------------------------------------------ local type = type local sub, byte, format = string.sub, string.byte, string.format local match, gmatch, gsub = string.match, string.gmatch, string.gsub +local lower, rep = string.lower, string.rep --- Map for 1st opcode byte. Ugly? Well ... read on. -local map_opc1 = { +-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on. +local map_opc1_32 = { --0x [0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es", "orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*", @@ -51,13 +47,13 @@ local map_opc1 = { "incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR", "decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR", --5x -"pushVR","pushVR","pushVR","pushVR","pushVR","pushVR","pushVR","pushVR", -"popVR","popVR","popVR","popVR","popVR","popVR","popVR","popVR", +"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR", +"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR", --6x -"pusha/pushaw","popa/popaw","boundVrm","arplWmr", +"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr", "fs:seg","gs:seg","o16:","a16", -"pushVi","imulVrmi","pushBs","imulVrms", -"insb","insd/insw","outsb","outsd/outsw", +"pushUi","imulVrmi","pushBs","imulVrms", +"insb","insVS","outsb","outsVS", --7x "joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj", "jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj", @@ -65,72 +61,87 @@ local map_opc1 = { "arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms", "testBmr","testVmr","xchgBrm","xchgVrm", "movBmr","movVmr","movBrm","movVrm", -"movVmg","leaVrm","movWgm","popVm", +"movVmg","leaVrm","movWgm","popUm", --9x -"nop|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR", +"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR", "xchgVaR","xchgVaR","xchgVaR","xchgVaR", -"cwde/cbw","cdq/cwd","call farViw","wait", -"pushf/pushfw","popf/popfw","sahf","lahf", +"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait", +"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf", --Ax "movBao","movVao","movBoa","movVoa", -"movsb","movsd/movsb","cmpsb","cmpsd/cmpsw", -"testBai","testVai","stosb","stosd/stosw", -"lodsb","lodsd/lodsw","scasb","scasd/scasw", +"movsb","movsVS","cmpsb","cmpsVS", +"testBai","testVai","stosb","stosVS", +"lodsb","lodsVS","scasb","scasVS", --Bx "movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi", -"movVRi","movVRi","movVRi","movVRi","movVRi","movVRi","movVRi","movVRi", +"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI", --Cx -"shift!Bmu","shift!Vmu","retBw","ret","lesVrm","ldsVrm","movBmi","movVmi", -"enterBwu","leave","retfBw","retf","int3","intBu","into","iret/iretw", +"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi", +"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS", --Dx "shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb", "fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7", --Ex -"loopneBj","loopeBj","loopBj","jecxz/jcxzBj","inBau","inVau","outBua","outVua", -"callDj","jmpDj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda", +"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj", +"inBau","inVau","outBua","outVua", +"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda", --Fx "lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm", -"clc","stc","cli","sti","cld","std","inc!Bm","inc!Vm", +"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm", } -assert(#map_opc1 == 255) - --- Map for 2nd opcode byte (0f xx). True CISC hell. Hey, I told you. --- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne +assert(#map_opc1_32 == 255) + +-- Map for 1st opcode byte in 64 bit mode (overrides only). +local map_opc1_64 = setmetatable({ + [0x06]=false, [0x07]=false, [0x0e]=false, + [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false, + [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false, + [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:", + [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb", + [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb", + [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb", + [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb", + [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false, + [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false, +}, { __index = map_opc1_32 }) + +-- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you. +-- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2 local map_opc2 = { --0x -[0]="sldt!Dmp","sgdt!Dmp","larVrm","lslVrm",nil,"syscall","clts","sysret", -"invd","wbinvd",nil,"ud1",nil,"prefetch!Bm","femms","3dnowMrmu", +[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret", +"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu", --1x "movupsXrm|movssXrm|movupdXrm|movsdXrm", "movupsXmr|movssXmr|movupdXmr|movsdXmr", -"movhlpsXrm|movsldupXrm|movlpdXrm|movddupXrm", -- TODO: movlpsXrMm (mem case). +"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm", "movlpsXmr||movlpdXmr", "unpcklpsXrm||unpcklpdXrm", "unpckhpsXrm||unpckhpdXrm", -"movlhpsXrm|movshdupXrm|movhpdXrm", -- TODO: movhpsXrMm (mem case). +"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm", "movhpsXmr||movhpdXmr", -"prefetcht!Bm","hintnopBm","hintnopBm","hintnopBm", -"hintnopBm","hintnopBm","hintnopBm","hintnopBm", +"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm", +"hintnopVm","hintnopVm","hintnopVm","hintnopVm", --2x -"movDmx","movDmy","movDxm","movDym","movDmz",nil,"movDzm",nil, +"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil, "movapsXrm||movapdXrm", "movapsXmr||movapdXmr", -"cvtpi2psXrMm|cvtsi2ssXrDm|cvtpi2pdXrMm|cvtsi2sdXrDm", -"movntpsXmr||movntpdXmr", -"cvttps2piMrXm|cvttss2siDrXm|cvttpd2piMrXm|cvttsd2siDrXm", -"cvtps2piMrXm|cvtss2siDrXm|cvtpd2piMrXm|cvtsd2siDrXm", +"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt", +"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr", +"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm", +"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm", "ucomissXrm||ucomisdXrm", "comissXrm||comisdXrm", --3x -"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,nil, -"ssse3*38",nil,"ssse3*3a",nil,nil,nil,nil,nil, +"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec", +"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil, --4x "cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm", "cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm", "cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm", "cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm", --5x -"movmskpsDrXm||movmskpdDrXm","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm", +"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm", "rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm", "andpsXrm||andpdXrm","andnpsXrm||andnpdXrm", "orpsXrm||orpdXrm","xorpsXrm||xorpdXrm", @@ -140,17 +151,19 @@ local map_opc2 = { "subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm", "divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm", --6x -"punpcklbwMrm||punpcklbqXrm","punpcklwdPrm","punpckldqPrm","packsswbPrm", +"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm", "pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm", "punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm", "||punpcklqdqXrm","||punpckhqdqXrm", -"movdPrDm","movqMrm|movdquXrm|movdqaXrm", +"movPrVSm","movqMrm|movdquXrm|movdqaXrm", --7x -"pshufwPrmu","pshiftw!Pmu","pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu", +"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu", +"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu", "pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|", -nil,nil,nil,nil, +"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$", +nil,nil, "||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm", -"movdDmMr|movqXrm|movdDmXr","movqMmr|movdquXmr|movdqaXmr", +"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr", --8x "joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj", "jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj", @@ -161,38 +174,38 @@ nil,nil,nil,nil, "push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil, "push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm", --Bx -"cmpxchgBmr","cmpxchgVmr","lssVrm","btrVmr", -"lfsVrm","lgsVrm","movzxVrBm","movzxDrWm", -nil,"ud2","bt!Vmu","btcVmr", -"bsfVrm","bsrVrm","movsxVrBm","movsxDrWm", +"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr", +"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt", +"|popcntVrm","ud2Dp","bt!Vmu","btcVmr", +"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt", --Cx "xaddBmr","xaddVmr", -"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","movntiDmr|", +"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|", "pinsrwPrWmu","pextrwDrPmu", -"shufpsXrmu||shufpdXrmu","cmpxchg!Dmp", -"bswapDR","bswapDR","bswapDR","bswapDR","bswapDR","bswapDR","bswapDR","bswapDR", +"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp", +"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR", --Dx "||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm", "paddqPrm","pmullwPrm", -"|movq2dqXrMm|movqXmr|movdq2qMrXm","pmovmskbDrPm", +"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm", "psubusbPrm","psubuswPrm","pminubPrm","pandPrm", "paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm", --Ex "pavgbPrm","psrawPrm","psradPrm","pavgwPrm", "pmulhuwPrm","pmulhwPrm", -"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","movntqMmr||movntdqXmr", +"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr", "psubsbPrm","psubswPrm","pminswPrm","porPrm", "paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm", --Fx "|||lddquXrm","psllwPrm","pslldPrm","psllqPrm", -"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm", +"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$", "psubbPrm","psubwPrm","psubdPrm","psubqPrm", "paddbPrm","paddwPrm","padddPrm","ud", } assert(map_opc2[255] == "ud") --- Map for SSSE3 opcodes. -local map_ssse3 = { +-- Map for three-byte opcodes. Can't wait for their next invention. +local map_opc3 = { ["38"] = { -- [66] 0f 38 xx --0x [0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm", @@ -200,20 +213,62 @@ local map_ssse3 = { "psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm", nil,nil,nil,nil, --1x -nil,nil,nil,nil,nil,nil,nil,nil, -nil,nil,nil,nil,"pabsbPrm","pabswPrm","pabsdPrm",nil, +"||pblendvbXrma",nil,nil,nil, +"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm", +nil,nil,nil,nil, +"pabsbPrm","pabswPrm","pabsdPrm",nil, +--2x +"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm", +"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil, +"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm", +nil,nil,nil,nil, +--3x +"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm", +"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm", +"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm", +"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm", +--4x +"||pmulddXrm","||phminposuwXrm", +--Fx +[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt", }, + ["3a"] = { -- [66] 0f 3a xx -[0x0f] = "palignrPrmu", +--0x +[0x00]=nil,nil,nil,nil,nil,nil,nil,nil, +"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu", +"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu", +--1x +nil,nil,nil,nil, +"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru", +nil,nil,nil,nil,nil,nil,nil,nil, +--2x +"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil, +--4x +[0x40] = "||dppsXrmu", +[0x41] = "||dppdXrmu", +[0x42] = "||mpsadbwXrmu", +--6x +[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu", +[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu", }, } +-- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands). +local map_opcvm = { +[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff", +[0xc8]="monitor",[0xc9]="mwait", +[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave", +[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga", +[0xf8]="swapgs",[0xf9]="rdtscp", +} + -- Map for FP opcodes. And you thought stack machines are simple? local map_opcfp = { -- D8-DF 00-BF: opcodes with a memory operand. -- D8 [0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm", -"fldFm",nil,"fstFm","fstpFm","fldenvDmp","fldcwWm","fnstenvDmp","fnstcwWm", +"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm", -- DA "fiaddDm","fimulDm","ficomDm","ficompDm", "fisubDm","fisubrDm","fidivDm","fidivrDm", @@ -262,18 +317,21 @@ local map_opcgroup = { shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" }, testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" }, testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" }, - inc = { "inc", "dec", "callDmp", "call farDmp", - "jmpDmp", "jmp farDmp", "push" }, + incb = { "inc", "dec" }, + incd = { "inc", "dec", "callUmp", "$call farDmp", + "jmpUmp", "$jmp farDmp", "pushUm" }, sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" }, - sgdt = { "sgdt", "sidt", "lgdt", "lidt", "smsw", nil, "lmsw", "invlpg" }, + sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt", + "smsw", nil, "lmsw", "vm*$invlpg" }, bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" }, - cmpxchg = { nil, "cmpxchg8b" }, + cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil, + nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" }, pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" }, pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" }, pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" }, pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" }, - fxsave = { "fxsave", "fxrstor", "ldmxcsr", "stmxcsr", - nil, "lfenceDp", "mfenceDp", "sfenceDp" }, -- TODO: clflush. + fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr", + nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" }, prefetch = { "prefetch", "prefetchw" }, prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" }, } @@ -281,13 +339,21 @@ local map_opcgroup = { ------------------------------------------------------------------------------ -- Maps for register names. -local map_aregs = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi" } local map_regs = { - B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh" }, - W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" }, - D = map_aregs, - M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, - X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" }, + B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", + "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, + B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", + "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, + W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", + "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" }, + D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", + "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" }, + Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }, + M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext! + X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" }, } local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" } @@ -297,9 +363,9 @@ local map_sz2n = { } local map_sz2prefix = { B = "byte", W = "word", D = "dword", - Q = "qword", -- No associated reg in 32 bit mode. - F = "dword", G = "qword", -- No need for sizes/register names for these two. + Q = "qword", M = "qword", X = "xword", + F = "dword", G = "qword", -- No need for sizes/register names for these two. } ------------------------------------------------------------------------------ @@ -307,13 +373,25 @@ local map_sz2prefix = { -- Output a nicely formatted line with an opcode and operands. local function putop(ctx, text, operands) local code, pos, hex = ctx.code, ctx.pos, "" - for i=ctx.start,pos-1 do - hex = hex..format("%02X", byte(code, i, i)) + local hmax = ctx.hexdump + if hmax > 0 then + for i=ctx.start,pos-1 do + hex = hex..format("%02X", byte(code, i, i)) + end + if #hex > hmax then hex = sub(hex, 1, hmax)..". " + else hex = hex..rep(" ", hmax-#hex+2) end end - if #hex > 16 then hex = sub(hex, 1, 16).."." end if operands then text = text.." "..operands end if ctx.o16 then text = "o16 "..text; ctx.o16 = false end + if ctx.a32 then text = "a32 "..text; ctx.a32 = false end if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end + if ctx.rex then + local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "").. + (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "") + if t ~= "" then text = "rex."..t.." "..text end + ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false + ctx.rex = false + end if ctx.seg then local text2, n = gsub(text, "%[", "["..ctx.seg..":") if n == 0 then text = ctx.seg.." "..text else text = text2 end @@ -325,22 +403,29 @@ local function putop(ctx, text, operands) local sym = ctx.symtab[imm] if sym then text = text.."\t->"..sym end end - ctx.out(format("%08x %-18s%s\n", ctx.addr+ctx.start, hex, text)) + ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text)) ctx.mrm = false ctx.start = pos ctx.imm = nil end +-- Clear all prefix flags. +local function clearprefixes(ctx) + ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false + ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false + ctx.rex = false; ctx.a32 = false +end + -- Fallback for incomplete opcodes at the end. local function incomplete(ctx) ctx.pos = ctx.stop+1 - ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false + clearprefixes(ctx) return putop(ctx, "(incomplete)") end -- Fallback for unknown opcodes. local function unknown(ctx) - ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false + clearprefixes(ctx) return putop(ctx, "(unknown)") end @@ -364,25 +449,36 @@ end -- Process pattern string and generate the operands. local function putpat(ctx, name, pat) - local operands, regs, sz, mode, sp, rm, sc, rx, disp, sdisp + local operands, regs, sz, mode, sp, rm, sc, rx, sdisp local code, pos, stop = ctx.code, ctx.pos, ctx.stop - -- Chars used: 1DFGMPQRVWXacdfgijmoprsuwxyz + -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz for p in gmatch(pat, ".") do local x = nil - if p == "V" then - sz = ctx.o16 and "W" or "D"; ctx.o16 = false + if p == "V" or p == "U" then + if ctx.rexw then sz = "Q"; ctx.rexw = false + elseif ctx.o16 then sz = "W"; ctx.o16 = false + elseif p == "U" and ctx.x64 then sz = "Q" + else sz = "D" end + regs = map_regs[sz] + elseif p == "T" then + if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end regs = map_regs[sz] - elseif match(p, "[BWDQFGMX]") then + elseif p == "B" then + sz = "B" + regs = ctx.rex and map_regs.B64 or map_regs.B + elseif match(p, "[WDQMXFG]") then sz = p regs = map_regs[sz] elseif p == "P" then sz = ctx.o16 and "X" or "M"; ctx.o16 = false regs = map_regs[sz] + elseif p == "S" then + name = name..lower(sz) elseif p == "s" then local imm = getimm(ctx, pos, 1); if not imm then return end - x = imm <= 127 and format("byte +0x%02x", imm) - or format("byte -0x%02x", 256-imm) + x = imm <= 127 and format("+0x%02x", imm) + or format("-0x%02x", 256-imm) pos = pos+1 elseif p == "u" then local imm = getimm(ctx, pos, 1); if not imm then return end @@ -393,25 +489,55 @@ local function putpat(ctx, name, pat) x = format("0x%x", imm) pos = pos+2 elseif p == "o" then -- [offset] - local imm = getimm(ctx, pos, 4); if not imm then return end - x = format("[0x%08x]", imm) - pos = pos+4 - elseif p == "i" then + if ctx.x64 then + local imm1 = getimm(ctx, pos, 4); if not imm1 then return end + local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end + x = format("[0x%08x%08x]", imm2, imm1) + pos = pos+8 + else + local imm = getimm(ctx, pos, 4); if not imm then return end + x = format("[0x%08x]", imm) + pos = pos+4 + end + elseif p == "i" or p == "I" then local n = map_sz2n[sz] - local imm = getimm(ctx, pos, n); if not imm then return end - x = format(imm > 65535 and "0x%08x" or "0x%x", imm) + if n == 8 and ctx.x64 and p == "I" then + local imm1 = getimm(ctx, pos, 4); if not imm1 then return end + local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end + x = format("0x%08x%08x", imm2, imm1) + else + if n == 8 then n = 4 end + local imm = getimm(ctx, pos, n); if not imm then return end + if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then + imm = (0xffffffff+1)-imm + x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm) + else + x = format(imm > 65535 and "0x%08x" or "0x%x", imm) + end + end pos = pos+n elseif p == "j" then local n = map_sz2n[sz] + if n == 8 then n = 4 end local imm = getimm(ctx, pos, n); if not imm then return end if sz == "B" and imm > 127 then imm = imm-256 elseif imm > 2147483647 then imm = imm-4294967296 end pos = pos+n imm = imm + pos + ctx.addr + if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end ctx.imm = imm - x = sz == "W" and format("word 0x%04x", imm%65536) - or format("0x%08x", imm) - elseif p == "R" then x = regs[byte(code, pos-1, pos-1)%8+1] + if sz == "W" then + x = format("word 0x%04x", imm%65536) + elseif ctx.x64 then + local lo = imm % 0x1000000 + x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo) + else + x = format("0x%08x", imm) + end + elseif p == "R" then + local r = byte(code, pos-1, pos-1)%8 + if ctx.rexb then r = r + 8; ctx.rexb = false end + x = regs[r+1] elseif p == "a" then x = regs[1] elseif p == "c" then x = "cl" elseif p == "d" then x = "dx" @@ -434,51 +560,70 @@ local function putpat(ctx, name, pat) pos = pos+1 rm = sc%8; sc = (sc-rm)/8 rx = sc%8; sc = (sc-rx)/8 + if ctx.rexx then rx = rx + 8; ctx.rexx = false end if rx == 4 then rx = nil end end if mode > 0 or rm == 5 then local dsz = mode if dsz ~= 1 then dsz = 4 end - disp = getimm(ctx, pos, dsz); if not disp then return end - sdisp = (dsz == 4 or disp <= 127) and - format(disp > 65535 and "+0x%08x" or "+0x%x", disp) or - format("-0x%x", 256-disp) + local disp = getimm(ctx, pos, dsz); if not disp then return end + if mode == 0 then rm = nil end + if rm or rx or (not sc and ctx.x64 and not ctx.a32) then + if dsz == 1 and disp > 127 then + sdisp = format("-0x%x", 256-disp) + elseif disp >= 0 and disp <= 0x7fffffff then + sdisp = format("+0x%x", disp) + else + sdisp = format("-0x%x", (0xffffffff+1)-disp) + end + else + sdisp = format(ctx.x64 and not ctx.a32 and + not (disp >= 0 and disp <= 0x7fffffff) + and "0xffffffff%08x" or "0x%08x", disp) + end pos = pos+dsz end end + if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end + if ctx.rexr then sp = sp + 8; ctx.rexr = false end end if p == "m" then if mode == 3 then x = regs[rm+1] else - local srm, srx = map_aregs[rm+1], "" + local aregs = ctx.a32 and map_regs.D or ctx.aregs + local srm, srx = "", "" + if rm then srm = aregs[rm+1] + elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end + ctx.a32 = false if rx then - srm = srm.."+" - srx = map_aregs[rx+1] + if rm then srm = srm.."+" end + srx = aregs[rx+1] if sc > 0 then srx = srx.."*"..(2^sc) end end - if mode == 0 and rm == 5 then - srm = "" - sdisp = format("%s0x%08x", rx and "+" or "", disp) - end x = format("[%s%s%s]", srm, srx, sdisp) end if mode < 3 and - (not match(pat, "[aRrgp]") or - name == "movzx" or name == "movsx") then -- Yuck. + (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck. x = map_sz2prefix[sz].." "..x end elseif p == "r" then x = regs[sp+1] elseif p == "g" then x = map_segregs[sp+1] elseif p == "p" then -- Suppress prefix. elseif p == "f" then x = "st"..rm - elseif p == "x" then x = "CR"..sp + elseif p == "x" then + if sp == 0 and ctx.lock and not ctx.x64 then + x = "CR8"; ctx.lock = false + else + x = "CR"..sp + end elseif p == "y" then x = "DR"..sp elseif p == "z" then x = "TR"..sp + elseif p == "t" then else error("bad pattern `"..pat.."'") end end - if x then operands = operands and operands..","..x or x end + if x then operands = operands and operands..", "..x or x end end ctx.pos = pos return putop(ctx, name, operands) @@ -487,23 +632,53 @@ end -- Forward declaration. local map_act --- Get a pattern from an opcode map and dispatch to handler. -local function opcdispatch(ctx, opcmap) - local pos = ctx.pos - local opat = opcmap[byte(ctx.code, pos, pos)] +-- Fetch and cache MRM byte. +local function getmrm(ctx) + local mrm = ctx.mrm + if not mrm then + local pos = ctx.pos + if pos > ctx.stop then return nil end + mrm = byte(ctx.code, pos, pos) + ctx.pos = pos+1 + ctx.mrm = mrm + end + return mrm +end + +-- Dispatch to handler depending on pattern. +local function dispatch(ctx, opat, patgrp) if not opat then return unknown(ctx) end if match(opat, "%|") then -- MMX/SSE variants depending on prefix. local p - if ctx.rep then p = ctx.rep=="rep" and "%|([^%|]*)" or "%|.-%|.-%|([^%|]*)" - elseif ctx.o16 then p = "%|.-%|([^%|]*)" + if ctx.rep then + p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)" + ctx.rep = false + elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false else p = "^[^%|]*" end opat = match(opat, p) - if not opat or opat == "" then return unknown(ctx) end - ctx.rep = false; ctx.o16 = false + if not opat then return unknown(ctx) end +-- ctx.rep = false; ctx.o16 = false + --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi] + --XXX remove in branches? + end + if match(opat, "%$") then -- reg$mem variants. + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end + opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)") + if opat == "" then return unknown(ctx) end end - local name, pat, act = match(opat, "^([a-z0-9 ]*)((.?).*)") - ctx.pos = pos + 1 - return map_act[act](ctx, name, pat) + if opat == "" then return unknown(ctx) end + local name, pat = match(opat, "^([a-z0-9 ]*)(.*)") + if pat == "" and patgrp then pat = patgrp end + return map_act[sub(pat, 1, 1)](ctx, name, pat) +end + +-- Get a pattern from an opcode map and dispatch to handler. +local function dispatchmap(ctx, opcmap) + local pos = ctx.pos + local opat = opcmap[byte(ctx.code, pos, pos)] + pos = pos + 1 + ctx.pos = pos + return dispatch(ctx, opat) end -- Map for action codes. The key is the first char after the name. @@ -514,20 +689,15 @@ map_act = { end, -- Operand size chars fall right through. - B = putpat, W = putpat, D = putpat, V = putpat, - F = putpat, G = putpat, + B = putpat, W = putpat, D = putpat, Q = putpat, + V = putpat, U = putpat, T = putpat, M = putpat, X = putpat, P = putpat, + F = putpat, G = putpat, -- Collect prefixes. [":"] = function(ctx, name, pat) ctx[pat == ":" and name or sub(pat, 2)] = name - end, - - -- Select alternate opcode name when prefixed with o16. - ["/"] = function(ctx, name, pat) - local wname, rpat = match(pat, "^/([a-z0-9 ]+)(.*)") - if ctx.o16 then name = wname; ctx.o16 = false end - return putpat(ctx, name, rpat) + if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes. end, -- Chain to special handler specified by name. @@ -537,46 +707,60 @@ map_act = { -- Use named subtable for opcode group. ["!"] = function(ctx, name, pat) + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end + return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2)) + end, - local pos = ctx.pos - if pos > ctx.stop then return incomplete(ctx) end - local mrm = byte(ctx.code, pos, pos) - ctx.pos = pos+1 - ctx.mrm = mrm - - local opat = map_opcgroup[name][((mrm-(mrm%8))/8)%8+1] - if not opat then return unknown(ctx) end - local name, pat2 = match(opat, "^([a-z0-9 ]*)(.*)") - return putpat(ctx, name, pat2 ~= "" and pat2 or sub(pat, 2)) + -- o16,o32[,o64] variants. + sz = function(ctx, name, pat) + if ctx.o16 then ctx.o16 = false + else + pat = match(pat, ",(.*)") + if ctx.rexw then + local p = match(pat, ",(.*)") + if p then pat = p; ctx.rexw = false end + end + end + pat = match(pat, "^[^,]*") + return dispatch(ctx, pat) end, -- Two-byte opcode dispatch. opc2 = function(ctx, name, pat) - return opcdispatch(ctx, map_opc2) + return dispatchmap(ctx, map_opc2) + end, + + -- Three-byte opcode dispatch. + opc3 = function(ctx, name, pat) + return dispatchmap(ctx, map_opc3[pat]) end, - -- SSSE3 dispatch. - ssse3 = function(ctx, name, pat) - return opcdispatch(ctx, map_ssse3[pat]) + -- VMX/SVM dispatch. + vm = function(ctx, name, pat) + return dispatch(ctx, map_opcvm[ctx.mrm]) end, -- Floating point opcode dispatch. fp = function(ctx, name, pat) - - local pos = ctx.pos - if pos > ctx.stop then return incomplete(ctx) end - local mrm = byte(ctx.code, pos, pos) - ctx.pos = pos+1 - ctx.mrm = mrm - + local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end local rm = mrm%8 local idx = pat*8 + ((mrm-rm)/8)%8 if mrm >= 192 then idx = idx + 64 end local opat = map_opcfp[idx] if type(opat) == "table" then opat = opat[rm+1] end - if not opat then return unknown(ctx) end - local name, pat2 = match(opat, "^([a-z0-9 ]*)(.*)") - return putpat(ctx, name, pat2) + return dispatch(ctx, opat) + end, + + -- REX prefix. + rex = function(ctx, name, pat) + if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed. + for p in gmatch(pat, ".") do ctx["rex"..p] = true end + ctx.rex = true + end, + + -- Special case for nop with REX prefix. + nop = function(ctx, name, pat) + return dispatch(ctx, ctx.rex and pat or "nop") end, } @@ -592,8 +776,8 @@ local function disass_block(ctx, ofs, len) ctx.stop = stop ctx.imm = nil ctx.mrm = false - ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false - while ctx.pos <= stop do opcdispatch(ctx, map_opc1) end + clearprefixes(ctx) + while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end if ctx.pos ~= ctx.start then incomplete(ctx) end end @@ -605,6 +789,18 @@ local function create_(code, addr, out) ctx.out = out or io.write ctx.symtab = {} ctx.disass = disass_block + ctx.hexdump = 16 + ctx.x64 = false + ctx.map1 = map_opc1_32 + ctx.aregs = map_regs.D + return ctx +end + +local function create64_(code, addr, out) + local ctx = create_(code, addr, out) + ctx.x64 = true + ctx.map1 = map_opc1_64 + ctx.aregs = map_regs.Q return ctx end @@ -613,10 +809,28 @@ local function disass_(code, addr, out) create_(code, addr, out):disass() end +local function disass64_(code, addr, out) + create64_(code, addr, out):disass() +end + +-- Return register name for RID. +local function regname_(r) + if r < 8 then return map_regs.D[r+1] end + return map_regs.X[r-7] +end + +local function regname64_(r) + if r < 16 then return map_regs.Q[r+1] end + return map_regs.X[r-15] +end -- Public module functions. module(...) create = create_ +create64 = create64_ disass = disass_ +disass64 = disass64_ +regname = regname_ +regname64 = regname64_ diff --git a/game/thirdparty/jit/dump.lua b/game/thirdparty/jit/dump.lua index e278bd5296..5f32eb80f4 100644 --- a/game/thirdparty/jit/dump.lua +++ b/game/thirdparty/jit/dump.lua @@ -1,264 +1,641 @@ ---------------------------------------------------------------------------- --- LuaJIT machine code dumper module. +-- LuaJIT compiler dump module. -- --- Copyright (C) 2005-2010 Mike Pall. All rights reserved. --- Released under the MIT/X license. See luajit.h for full copyright notice. +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h ---------------------------------------------------------------------------- --- Activate this module to dump the machine code for all functions --- immediately after they have been compiled. The disassembler --- output is mixed with the bytecode listing. -- --- Try: luajit -j dump -e 'print "foo"' --- luajit -j dump=foo.dump foo.lua --- luajit -j off -j dump -e 'jit.compile(assert(loadfile"foo.lua")))' +-- This module can be used to debug the JIT compiler itself. It dumps the +-- code representations and structures used in various compiler stages. -- --- Default output is to stderr. To redirect output to a file, --- pass a filename as an argument or set the environment variable --- "LUAJIT_DUMPFILE". --- Note: The file is overwritten each time you run luajit. +-- Example usage: +-- +-- luajit -jdump -e "local x=0; for i=1,1e6 do x=x+i end; print(x)" +-- luajit -jdump=im -e "for i=1,1000 do for j=1,1000 do end end" | less -R +-- luajit -jdump=is myapp.lua | less -R +-- luajit -jdump=-b myapp.lua +-- luajit -jdump=+aH,myapp.html myapp.lua +-- luajit -jdump=ixT,myapp.dump myapp.lua +-- +-- The first argument specifies the dump mode. The second argument gives +-- the output file name. Default output is to stdout, unless the environment +-- variable LUAJIT_DUMPFILE is set. The file is overwritten every time the +-- module is started. +-- +-- Different features can be turned on or off with the dump mode. If the +-- mode starts with a '+', the following features are added to the default +-- set of features; a '-' removes them. Otherwise the features are replaced. +-- +-- The following dump features are available (* marks the default): +-- +-- * t Print a line for each started, ended or aborted trace (see also -jv). +-- * b Dump the traced bytecode. +-- * i Dump the IR (intermediate representation). +-- r Augment the IR with register/stack slots. +-- s Dump the snapshot map. +-- * m Dump the generated machine code. +-- x Print each taken trace exit. +-- X Print each taken trace exit and the contents of all registers. +-- +-- The output format can be set with the following characters: +-- +-- T Plain text output. +-- A ANSI-colored text output +-- H Colorized HTML + CSS output. +-- +-- The default output format is plain text. It's set to ANSI-colored text +-- if the COLORTERM variable is set. Note: this is independent of any output +-- redirection, which is actually considered a feature. +-- +-- You probably want to use less -R to enjoy viewing ANSI-colored text from +-- a pipe or a file. Add this to your ~/.bashrc: export LESS="-R" -- --- TODO: Find a way to be more selective on what to dump. ------------------------------------------------------------------------------ --- Priority for compiler pipeline. Must run after backend (negative) --- and should be even because we only catch successful compiles. -local PRIORITY = -98 - -- Cache some library functions and objects. local jit = require("jit") -assert(jit.version_num == 10106, "LuaJIT core/library version mismatch") +assert(jit.version_num == 20000, "LuaJIT core/library version mismatch") local jutil = require("jit.util") -local type, format, gsub = type, string.format, string.gsub -local bytecode, const = jutil.bytecode, jutil.const -local getinfo = debug.getinfo +local vmdef = require("jit.vmdef") +local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc +local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek +local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap +local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr +local bit = require("bit") +local band, shl, shr = bit.band, bit.lshift, bit.rshift +local sub, gsub, format = string.sub, string.gsub, string.format +local byte, char, rep = string.byte, string.char, string.rep +local type, tostring = type, tostring local stdout, stderr = io.stdout, io.stderr --- Load the right disassembler. -local dis = require("jit.dis_"..jit.arch) -local discreate, disass_ = dis.create, dis.disass - --- Turn compilation off for the whole module. LuaJIT would do that anyway. -jit.off(true, true) - --- Separator line. -local sepline = "-------------------------------" - --- Map JSUB indices to names. --- CHECK: must match the order in ljit_x86.h. Regenerate with: --- grep '^ *JSUB_[^_].*,' ljit_x86.h | sed -e 's/^ *JSUB_/ "/' -e 's/,.*/",/' -local jsubnames = { - "STACKPTR", - "GATE_LJ", - "GATE_JL", - "GATE_JC", - "GROW_STACK", - "GROW_CI", - "GATE_JC_PATCH", - "GATE_JC_DEBUG", - "DEOPTIMIZE_CALLER", - "DEOPTIMIZE", - "DEOPTIMIZE_OPEN", - "HOOKINS", - "GCSTEP", - "STRING_SUB3", - "STRING_SUB2", - "HOOKCALL", - "HOOKRET", - "METACALL", - "METATAILCALL", - "BARRIERF", - "GETGLOBAL", - "GETTABLE_KSTR", - "GETTABLE_STR", - "BARRIERBACK", - "SETGLOBAL", - "SETTABLE_KSTR", - "SETTABLE_STR", - "GETTABLE_KNUM", - "GETTABLE_NUM", - "SETTABLE_KNUM", - "SETTABLE_NUM", - "LOG2_TWORD", - "CONCAT_STR2", +-- Load other modules on-demand. +local bcline, disass + +-- Active flag, output file handle and dump mode. +local active, out, dumpmode + +------------------------------------------------------------------------------ + +local symtab = {} +local nexitsym = 0 + +-- Fill symbol table with trace exit addresses. +local function fillsymtab(nexit) + local t = symtab + if nexitsym == 0 then + local ircall = vmdef.ircall + for i=0,#ircall do t[ircalladdr(i)] = ircall[i] end + end + if nexit > nexitsym then + for i=nexitsym,nexit-1 do + local addr = traceexitstub(i) + if addr == nil then nexit = 1000000; break end + t[addr] = tostring(i) + end + nexitsym = nexit + end + return t +end + +local function dumpwrite(s) + out:write(s) +end + +-- Disassemble machine code. +local function dump_mcode(tr) + local info = traceinfo(tr) + if not info then return end + local mcode, addr, loop = tracemc(tr) + if not mcode then return end + if not disass then disass = require("jit.dis_"..jit.arch) end + out:write("---- TRACE ", tr, " mcode ", #mcode, "\n") + local ctx = disass.create(mcode, addr, dumpwrite) + ctx.hexdump = 0 + ctx.symtab = fillsymtab(info.nexit) + if loop ~= 0 then + symtab[addr+loop] = "LOOP" + ctx:disass(0, loop) + out:write("->LOOP:\n") + ctx:disass(loop, #mcode-loop) + symtab[addr+loop] = nil + else + ctx:disass(0, #mcode) + end +end + +------------------------------------------------------------------------------ + +local irtype_text = { + [0] = "nil", + "fal", + "tru", + "lud", + "str", + "p32", + "thr", + "pro", + "fun", + "p64", + "cdt", + "tab", + "udt", + "flt", + "num", + "i8 ", + "u8 ", + "i16", + "u16", + "int", + "u32", + "i64", + "u64", + "sfp", +} + +local colortype_ansi = { + [0] = "%s", + "%s", + "%s", + "\027[36m%s\027[m", + "\027[32m%s\027[m", + "%s", + "\027[1m%s\027[m", + "%s", + "\027[1m%s\027[m", + "%s", + "\027[33m%s\027[m", + "\027[31m%s\027[m", + "\027[36m%s\027[m", + "\027[34m%s\027[m", + "\027[34m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", + "\027[35m%s\027[m", } --- Generate map from JSUB addresses to JSUB names. -local jsubmap = {} -do - local jsubmcode = jutil.jsubmcode - for pc=0,100000 do - local addr = jsubmcode(pc) - if not addr then break end - jsubmap[addr] = jsubnames[pc+1] or "JSUB#"..pc +local function colorize_text(s, t) + return s +end + +local function colorize_ansi(s, t) + return format(colortype_ansi[t], s) +end + +local irtype_ansi = setmetatable({}, + { __index = function(tab, t) + local s = colorize_ansi(irtype_text[t], t); tab[t] = s; return s; end }) + +local html_escape = { ["<"] = "<", [">"] = ">", ["&"] = "&", } + +local function colorize_html(s, t) + s = gsub(s, "[<>&]", html_escape) + return format('<span class="irt_%s">%s</span>', irtype_text[t], s) +end + +local irtype_html = setmetatable({}, + { __index = function(tab, t) + local s = colorize_html(irtype_text[t], t); tab[t] = s; return s; end }) + +local header_html = [[ +<style type="text/css"> +background { background: #ffffff; color: #000000; } +pre.ljdump { +font-size: 10pt; +background: #f0f4ff; +color: #000000; +border: 1px solid #bfcfff; +padding: 0.5em; +margin-left: 2em; +margin-right: 2em; +} +span.irt_str { color: #00a000; } +span.irt_thr, span.irt_fun { color: #404040; font-weight: bold; } +span.irt_tab { color: #c00000; } +span.irt_udt, span.irt_lud { color: #00c0c0; } +span.irt_num { color: #4040c0; } +span.irt_int, span.irt_i8, span.irt_u8, span.irt_i16, span.irt_u16 { color: #b040b0; } +</style> +]] + +local colorize, irtype + +-- Lookup tables to convert some literals into names. +local litname = { + ["SLOAD "] = setmetatable({}, { __index = function(t, mode) + local s = "" + if band(mode, 1) ~= 0 then s = s.."P" end + if band(mode, 2) ~= 0 then s = s.."F" end + if band(mode, 4) ~= 0 then s = s.."T" end + if band(mode, 8) ~= 0 then s = s.."C" end + if band(mode, 16) ~= 0 then s = s.."R" end + if band(mode, 32) ~= 0 then s = s.."I" end + t[mode] = s + return s + end}), + ["XLOAD "] = { [0] = "", "R", "V", "RV", "U", "RU", "VU", "RVU", }, + ["CONV "] = setmetatable({}, { __index = function(t, mode) + local s = irtype[band(mode, 31)] + s = irtype[band(shr(mode, 5), 31)].."."..s + if band(mode, 0x400) ~= 0 then s = s.." trunc" + elseif band(mode, 0x800) ~= 0 then s = s.." sext" end + local c = shr(mode, 14) + if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end + t[mode] = s + return s + end}), + ["FLOAD "] = vmdef.irfield, + ["FREF "] = vmdef.irfield, + ["FPMATH"] = vmdef.irfpm, +} + +local function ctlsub(c) + if c == "\n" then return "\\n" + elseif c == "\r" then return "\\r" + elseif c == "\t" then return "\\t" + elseif c == "\r" then return "\\r" + else return format("\\%03d", byte(c)) end end --- Pretty-print a constant. -local function conststr(func, idx) - local k = const(func, idx) - if k == nil then return "nil" - elseif k == true then return "true" - elseif k == false then return "false" - elseif type(k) == "string" then - if #k > 10 then return format('"%.10s"~', k) - else return '"'..k..'"' end - else return k.."" end +local function fmtfunc(func, pc) + local fi = funcinfo(func, pc) + if fi.loc then + return fi.loc + elseif fi.ffid then + return vmdef.ffnames[fi.ffid] + elseif fi.addr then + return format("C:%x", fi.addr) + else + return "(?)" + end end --- Pretty-print a bytecode instruction (one or two lines). -local function bytecodeout(out, func, pc) - local op, a, b, c = bytecode(func, pc) - if not op then - return true - elseif op == "JMP" then - out:write(format("\n--%04d-- JMP => %04d", pc, pc+1+b)) - elseif op == "FORLOOP" or op == "FORPREP" then - out:write(format("\n--%04d-- %-9s %3d => %04d", pc, op, a, pc+1+b)) +local function formatk(tr, idx) + local k, t, slot = tracek(tr, idx) + local tn = type(k) + local s + if tn == "number" then + if k == 2^52+2^51 then + s = "bias" + else + s = format("%+.14g", k) + end + elseif tn == "string" then + s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub)) + elseif tn == "function" then + s = fmtfunc(k) + elseif tn == "table" then + s = format("{%p}", k) + elseif tn == "userdata" then + if t == 12 then + s = format("userdata:%p", k) + else + s = format("[%p]", k) + if s == "[0x00000000]" then s = "NULL" end + end + elseif t == 21 then -- int64_t + s = sub(tostring(k), 1, -3) + if sub(s, 1, 1) ~= "-" then s = "+"..s end else - out:write(format("\n--%04d-- %-9s %3d %4s %4s", - pc, op, a, b or "", c or "")) - if b and b < 0 then out:write(" ; ", conststr(func, b)) end - if c and c < 0 then out:write(" ; ", conststr(func, c)) end + s = tostring(k) -- For primitives. + end + s = colorize(format("%-4s", s), t) + if slot then + s = format("%s @%d", s, slot) end + return s end --- Dump machine code and mix it with the bytecode listing. -local function dumpfunc(func, out, deopt) - if not out then out = stderr end - local info = getinfo(func, "S") - - -- Don't bother checking for the right blocks to dump. - -- Dump the main block (if not deopt) and always all deopt blocks. - for block=deopt and 2 or 1,1000000 do - local addr, code, mfmiter = jutil.mcode(func, block) - if not addr then - if code then return code end -- Not compiled: return status. - break -- No more blocks to dump. +local function printsnap(tr, snap) + local n = 2 + for s=0,snap[1]-1 do + local sn = snap[n] + if shr(sn, 24) == s then + n = n + 1 + local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS + if ref < 0 then + out:write(formatk(tr, ref)) + elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM + out:write(colorize(format("%04d/%04d", ref, ref+1), 14)) + else + local m, ot, op1, op2 = traceir(tr, ref) + out:write(colorize(format("%04d", ref), band(ot, 31))) + end + out:write(band(sn, 0x10000) == 0 and " " or "|") -- SNAP_FRAME + else + out:write("---- ") end + end + out:write("]\n") +end + +-- Dump snapshots (not interleaved with IR). +local function dump_snap(tr) + out:write("---- TRACE ", tr, " snapshots\n") + for i=0,1000000000 do + local snap = tracesnap(tr, i) + if not snap then break end + out:write(format("#%-3d %04d [ ", i, snap[0])) + printsnap(tr, snap) + end +end - -- Print header. - out:write(sepline, " ", info.source, ":", info.linedefined) - if block ~= 1 then out:write(" DEOPT block ", block) end - - -- Create disassembler context. - local ctx = discreate(code, addr, function(s) out:write(s) end) - ctx.symtab = jsubmap - - -- Dump an mcode block. - local pc, ofs = 1, 0 - local len, isdeopt = mfmiter() - if isdeopt then pc = len; len = 0 - elseif block ~= 1 then break end -- Stop before next main block. - for t, m in mfmiter do - if t == "COMBINE" then - bytecodeout(out, func, pc) +-- Return a register name or stack slot for a rid/sp location. +local function ridsp_name(ridsp) + if not disass then disass = require("jit.dis_"..jit.arch) end + local rid = band(ridsp, 0xff) + if ridsp > 255 then return format("[%x]", shr(ridsp, 8)*4) end + if rid < 128 then return disass.regname(rid) end + return "" +end + +-- Recursively gather CALL* args and dump them. +local function dumpcallargs(tr, ins) + if ins < 0 then + out:write(formatk(tr, ins)) + else + local m, ot, op1, op2 = traceir(tr, ins) + local oidx = 6*shr(ot, 8) + local op = sub(vmdef.irnames, oidx+1, oidx+6) + if op == "CARG " then + dumpcallargs(tr, op1) + if op2 < 0 then + out:write(" ", formatk(tr, op2)) else - if len ~= 0 then - out:write("\n") - if len > 0 then - ctx:disass(ofs, len) - ofs = ofs + len - else - out:write(format("%08x ** deoptimized\n", addr+ofs)) - ofs = ofs - len - end - len = 0 + out:write(" ", format("%04d", op2)) + end + else + out:write(format("%04d", ins)) + end + end +end + +-- Dump IR and interleaved snapshots. +local function dump_ir(tr, dumpsnap, dumpreg) + local info = traceinfo(tr) + if not info then return end + local nins = info.nins + out:write("---- TRACE ", tr, " IR\n") + local irnames = vmdef.irnames + local snapref = 65536 + local snap, snapno + if dumpsnap then + snap = tracesnap(tr, 0) + snapref = snap[0] + snapno = 0 + end + for ins=1,nins do + if ins >= snapref then + if dumpreg then + out:write(format(".... SNAP #%-3d [ ", snapno)) + else + out:write(format(".... SNAP #%-3d [ ", snapno)) + end + printsnap(tr, snap) + snapno = snapno + 1 + snap = tracesnap(tr, snapno) + snapref = snap and snap[0] or 65536 + end + local m, ot, op1, op2, ridsp = traceir(tr, ins) + local oidx, t = 6*shr(ot, 8), band(ot, 31) + local op = sub(irnames, oidx+1, oidx+6) + if op == "LOOP " then + if dumpreg then + out:write(format("%04d ------------ LOOP ------------\n", ins)) + else + out:write(format("%04d ------ LOOP ------------\n", ins)) + end + elseif op ~= "NOP " and op ~= "CARG " and + (dumpreg or op ~= "RENAME") then + if dumpreg then + out:write(format("%04d %-5s ", ins, ridsp_name(ridsp))) + else + out:write(format("%04d ", ins)) + end + out:write(format("%s%s %s %s ", + band(ot, 128) == 0 and " " or ">", + band(ot, 64) == 0 and " " or "+", + irtype[t], op)) + local m1, m2 = band(m, 3), band(m, 3*4) + if sub(op, 1, 4) == "CALL" then + if m2 == 1*4 then -- op2 == IRMlit + out:write(format("%-10s (", vmdef.ircall[op2])) + elseif op2 < 0 then + out:write(format("[0x%x](", tonumber((tracek(tr, op2))))) + else + out:write(format("%04d (", op2)) end - if type(t) == "number" then - if m then - if isdeopt then - pc = t - 1 + if op1 ~= -1 then dumpcallargs(tr, op1) end + out:write(")") + elseif op == "CNEW " and op2 == -1 then + out:write(formatk(tr, op1)) + elseif m1 ~= 3 then -- op1 != IRMnone + if op1 < 0 then + out:write(formatk(tr, op1)) + else + out:write(format(m1 == 0 and "%04d" or "#%-3d", op1)) + end + if m2 ~= 3*4 then -- op2 != IRMnone + if m2 == 1*4 then -- op2 == IRMlit + local litn = litname[op] + if litn and litn[op2] then + out:write(" ", litn[op2]) + elseif op == "UREFO " or op == "UREFC " then + out:write(format(" #%-3d", shr(op2, 8))) else - bytecodeout(out, func, pc) - len = -t + out:write(format(" #%-3d", op2)) end + elseif op2 < 0 then + out:write(" ", formatk(tr, op2)) else - len = t - if bytecodeout(out, func, pc) then break end + out:write(format(" %04d", op2)) end end end - pc = pc + 1 + out:write("\n") end - if len and len ~= 0 then - out:write(sepline, " tail code\n") - ctx:disass(ofs, len) + end + if snap then + if dumpreg then + out:write(format(".... SNAP #%-3d [ ", snapno)) + else + out:write(format(".... SNAP #%-3d [ ", snapno)) end + printsnap(tr, snap) end +end - -- Print footer. - out:write(sepline, "\n") - out:flush() +------------------------------------------------------------------------------ + +local recprefix = "" +local recdepth = 0 + +-- Format trace error message. +local function fmterr(err, info) + if type(err) == "number" then + if type(info) == "function" then info = fmtfunc(info) end + err = format(vmdef.traceerr[err], info) + end + return err end --- Dump the internal JIT subroutines. -local function dumpjsub_(out) - if not out then out = stderr end - local addr, code = jutil.jsubmcode() - - -- Create disassembler context. - local ctx = discreate(code, addr, function(s) out:write(s) end) - ctx.symtab = jsubmap - - -- Collect addresses and sort them. - local t = {} - for addr in pairs(jsubmap) do t[#t+1] = addr end - t[#t+1] = addr + #code - table.sort(t) - - -- Go through the addresses in ascending order. - local ofs = addr - for i=2,#t do - local next = t[i] - out:write("\n->", jsubmap[ofs], ":\n") -- Output label for JSUB. - ctx:disass(ofs-addr, next-ofs) -- Disassemble corresponding code block. - ofs = next +-- Dump trace states. +local function dump_trace(what, tr, func, pc, otr, oex) + if what == "stop" or (what == "abort" and dumpmode.a) then + if dumpmode.i then dump_ir(tr, dumpmode.s, dumpmode.r and what == "stop") + elseif dumpmode.s then dump_snap(tr) end + if dumpmode.m then dump_mcode(tr) end + end + if what == "start" then + if dumpmode.H then out:write('<pre class="ljdump">\n') end + out:write("---- TRACE ", tr, " ", what) + if otr then out:write(" ", otr, "/", oex) end + out:write(" ", fmtfunc(func, pc), "\n") + recprefix = "" + elseif what == "stop" or what == "abort" then + out:write("---- TRACE ", tr, " ", what) + recprefix = nil + if what == "abort" then + out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n") + else + local link = traceinfo(tr).link + if link == tr then + link = "loop" + elseif link == 0 then + link = "interpreter" + end + out:write(" -> ", link, "\n") + end + if dumpmode.H then out:write("</pre>\n\n") else out:write("\n") end + else + out:write("---- TRACE ", what, "\n\n") end out:flush() end +-- Dump recorded bytecode. +local function dump_record(tr, func, pc, depth, callee) + if depth ~= recdepth then + recdepth = depth + recprefix = rep(" .", depth) + end + local line + if pc >= 0 then + line = bcline(func, pc, recprefix) + if dumpmode.H then line = gsub(line, "[<>&]", html_escape) end + else + line = "0000 "..recprefix.." FUNCC \n" + callee = func + end + if pc <= 0 then + out:write(sub(line, 1, -2), " ; ", fmtfunc(func), "\n") + else + out:write(line) + end + if pc >= 0 and band(funcbc(func, pc), 0xff) < 16 then -- ORDER BC + out:write(bcline(func, pc+1, recprefix)) -- Write JMP for cond. + end +end --- Active flag and output file handle. -local active, out +------------------------------------------------------------------------------ --- Dump handler for compiler pipeline. -local function h_dump(st) - local ok, err = pcall(dumpfunc, st.func, out, st.deopt) - if not ok then - stderr:write("\nERROR: jit.dump disabled: ", err, "\n") - jit.attach(h_dump) -- Better turn ourselves off after a failure. - if out and out ~= stdout then out:close() end - out = nil - active = nil +-- Dump taken trace exits. +local function dump_texit(tr, ex, ngpr, nfpr, ...) + out:write("---- TRACE ", tr, " exit ", ex, "\n") + if dumpmode.X then + local regs = {...} + if jit.arch == "x64" then + for i=1,ngpr do + out:write(format(" %016x", regs[i])) + if i % 4 == 0 then out:write("\n") end + end + else + for i=1,ngpr do + out:write(format(" %08x", regs[i])) + if i % 8 == 0 then out:write("\n") end + end + end + for i=1,nfpr do + out:write(format(" %+17.14g", regs[ngpr+i])) + if i % 4 == 0 then out:write("\n") end + end end end --- Detach dump handler from compiler pipeline. +------------------------------------------------------------------------------ + +-- Detach dump handlers. local function dumpoff() if active then active = false - jit.attach(h_dump) - if out and out ~= stdout then out:close() end + jit.attach(dump_texit) + jit.attach(dump_record) + jit.attach(dump_trace) + if out and out ~= stdout and out ~= stderr then out:close() end out = nil end end --- Open the output file and attach dump handler to compiler pipeline. -local function dumpon(filename) +-- Open the output file and attach dump handlers. +local function dumpon(opt, outfile) if active then dumpoff() end - local outfile = filename or os.getenv("LUAJIT_DUMPFILE") - out = outfile and (outfile == "-" and stdout or assert(io.open(outfile, "w"))) - jit.attach(h_dump, PRIORITY) + + local colormode = os.getenv("COLORTERM") and "A" or "T" + if opt then + opt = gsub(opt, "[TAH]", function(mode) colormode = mode; return ""; end) + end + + local m = { t=true, b=true, i=true, m=true, } + if opt and opt ~= "" then + local o = sub(opt, 1, 1) + if o ~= "+" and o ~= "-" then m = {} end + for i=1,#opt do m[sub(opt, i, i)] = (o ~= "-") end + end + dumpmode = m + + if m.t or m.b or m.i or m.s or m.m then + jit.attach(dump_trace, "trace") + end + if m.b then + jit.attach(dump_record, "record") + if not bcline then bcline = require("jit.bc").line end + end + if m.x or m.X then + jit.attach(dump_texit, "texit") + end + + if not outfile then outfile = os.getenv("LUAJIT_DUMPFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stdout + end + + m[colormode] = true + if colormode == "A" then + colorize = colorize_ansi + irtype = irtype_ansi + elseif colormode == "H" then + colorize = colorize_html + irtype = irtype_html + out:write(header_html) + else + colorize = colorize_text + irtype = irtype_text + end + active = true end - -- Public module functions. module(...) -disass = disass_ -dump = dumpfunc -dumpjsub = dumpjsub_ on = dumpon off = dumpoff start = dumpon -- For -j command line option. diff --git a/game/thirdparty/jit/v.lua b/game/thirdparty/jit/v.lua new file mode 100644 index 0000000000..bf61f1253e --- /dev/null +++ b/game/thirdparty/jit/v.lua @@ -0,0 +1,162 @@ +---------------------------------------------------------------------------- +-- Verbose mode of the LuaJIT compiler. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- Released under the MIT/X license. See Copyright Notice in luajit.h +---------------------------------------------------------------------------- +-- +-- This module shows verbose information about the progress of the +-- JIT compiler. It prints one line for each generated trace. This module +-- is useful to see which code has been compiled or where the compiler +-- punts and falls back to the interpreter. +-- +-- Example usage: +-- +-- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end" +-- luajit -jv=myapp.out myapp.lua +-- +-- Default output is to stderr. To redirect the output to a file, pass a +-- filename as an argument (use '-' for stdout) or set the environment +-- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the +-- module is started. +-- +-- The output from the first example should look like this: +-- +-- [TRACE 1 (command line):1] +-- [TRACE 2 (1/3) (command line):1 -> 1] +-- +-- The first number in each line is the internal trace number. Next are +-- the file name ('(command line)') and the line number (':1') where the +-- trace has started. Side traces also show the parent trace number and +-- the exit number where they are attached to in parentheses ('(1/3)'). +-- An arrow at the end shows where the trace links to ('-> 1'), unless +-- it loops to itself. +-- +-- In this case the inner loop gets hot and is traced first, generating +-- a root trace. Then the last exit from the 1st trace gets hot, too, +-- and triggers generation of the 2nd trace. The side trace follows the +-- path along the outer loop and *around* the inner loop, back to its +-- start, and then links to the 1st trace. Yes, this may seem unusual, +-- if you know how traditional compilers work. Trace compilers are full +-- of surprises like this -- have fun! :-) +-- +-- Aborted traces are shown like this: +-- +-- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50] +-- +-- Don't worry -- trace aborts are quite common, even in programs which +-- can be fully compiled. The compiler may retry several times until it +-- finds a suitable trace. +-- +-- Of course this doesn't work with features that are not-yet-implemented +-- (NYI error messages). The VM simply falls back to the interpreter. This +-- may not matter at all if the particular trace is not very high up in +-- the CPU usage profile. Oh, and the interpreter is quite fast, too. +-- +-- Also check out the -jdump module, which prints all the gory details. +-- +------------------------------------------------------------------------------ + +-- Cache some library functions and objects. +local jit = require("jit") +assert(jit.version_num == 20000, "LuaJIT core/library version mismatch") +local jutil = require("jit.util") +local vmdef = require("jit.vmdef") +local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo +local type, format = type, string.format +local stdout, stderr = io.stdout, io.stderr + +-- Active flag and output file handle. +local active, out + +------------------------------------------------------------------------------ + +local startloc, startex + +local function fmtfunc(func, pc) + local fi = funcinfo(func, pc) + if fi.loc then + return fi.loc + elseif fi.ffid then + return vmdef.ffnames[fi.ffid] + elseif fi.addr then + return format("C:%x", fi.addr) + else + return "(?)" + end +end + +-- Format trace error message. +local function fmterr(err, info) + if type(err) == "number" then + if type(info) == "function" then info = fmtfunc(info) end + err = format(vmdef.traceerr[err], info) + end + return err +end + +-- Dump trace states. +local function dump_trace(what, tr, func, pc, otr, oex) + if what == "start" then + startloc = fmtfunc(func, pc) + startex = otr and "("..otr.."/"..oex..") " or "" + else + if what == "abort" then + local loc = fmtfunc(func, pc) + if loc ~= startloc then + out:write(format("[TRACE --- %s%s -- %s at %s]\n", + startex, startloc, fmterr(otr, oex), loc)) + else + out:write(format("[TRACE --- %s%s -- %s]\n", + startex, startloc, fmterr(otr, oex))) + end + elseif what == "stop" then + local link = traceinfo(tr).link + if link == 0 then + out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n", + tr, startex, startloc)) + elseif link == tr then + out:write(format("[TRACE %3s %s%s]\n", tr, startex, startloc)) + else + out:write(format("[TRACE %3s %s%s -> %d]\n", + tr, startex, startloc, link)) + end + else + out:write(format("[TRACE %s]\n", what)) + end + out:flush() + end +end + +------------------------------------------------------------------------------ + +-- Detach dump handlers. +local function dumpoff() + if active then + active = false + jit.attach(dump_trace) + if out and out ~= stdout and out ~= stderr then out:close() end + out = nil + end +end + +-- Open the output file and attach dump handlers. +local function dumpon(outfile) + if active then dumpoff() end + if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end + if outfile then + out = outfile == "-" and stdout or assert(io.open(outfile, "w")) + else + out = stderr + end + jit.attach(dump_trace, "trace") + active = true +end + +-- Public module functions. +module(...) + +on = dumpon +off = dumpoff +start = dumpon -- For -j command line option. + diff --git a/game/thirdparty/jit/vmdef.lua b/game/thirdparty/jit/vmdef.lua new file mode 100644 index 0000000000..d7e5225030 --- /dev/null +++ b/game/thirdparty/jit/vmdef.lua @@ -0,0 +1,284 @@ +-- This is a generated file. DO NOT EDIT! + +module(...) + +bcnames = "ISLT ISGE ISLE ISGT ISEQV ISNEV ISEQS ISNES ISEQN ISNEN ISEQP ISNEP ISTC ISFC IST ISF MOV NOT UNM LEN ADDVN SUBVN MULVN DIVVN MODVN ADDNV SUBNV MULNV DIVNV MODNV ADDVV SUBVV MULVV DIVVV MODVV POW CAT KSTR KCDATAKSHORTKNUM KPRI KNIL UGET USETV USETS USETN USETP UCLO FNEW TNEW TDUP GGET GSET TGETV TGETS TGETB TSETV TSETS TSETB TSETM CALLM CALL CALLMTCALLT ITERC ITERN VARG ISNEXTRETM RET RET0 RET1 FORI JFORI FORL IFORL JFORL ITERL IITERLJITERLLOOP ILOOP JLOOP JMP FUNCF IFUNCFJFUNCFFUNCV IFUNCVJFUNCVFUNCC FUNCCW" + +irnames = "LT GE LE GT ULT UGE ULE UGT EQ NE ABC RETF NOP BASE HIOP LOOP USE PHI RENAMEKPRI KINT KGC KPTR KKPTR KNULL KNUM KINT64KSLOT BNOT BSWAP BAND BOR BXOR BSHL BSHR BSAR BROL BROR ADD SUB MUL DIV MOD POW NEG ABS ATAN2 LDEXP MIN MAX FPMATHADDOV SUBOV MULOV AREF HREFK HREF NEWREFUREFO UREFC FREF STRREFALOAD HLOAD ULOAD FLOAD XLOAD SLOAD VLOAD ASTOREHSTOREUSTOREFSTOREXSTORESNEW XSNEW TNEW TDUP CNEW CNEWI TBAR OBAR XBAR CONV TOBIT TOSTR STRTO CALLN CALLL CALLS CALLXSCARG " + +irfpm = { [0]="floor", "ceil", "trunc", "sqrt", "exp", "exp2", "log", "log2", "log10", "sin", "cos", "tan", "other", } + +irfield = { [0]="str.len", "func.env", "tab.meta", "tab.array", "tab.node", "tab.asize", "tab.hmask", "tab.nomm", "udata.meta", "udata.udtype", "udata.file", "cdata.typeid", "cdata.ptr", "cdata.int64", "cdata.int64hi", } + +ircall = { +[0]="lj_str_cmp", +"lj_str_new", +"lj_str_tonum", +"lj_str_fromint", +"lj_str_fromnum", +"lj_tab_new1", +"lj_tab_dup", +"lj_tab_newkey", +"lj_tab_len", +"lj_gc_step_jit", +"lj_gc_barrieruv", +"lj_mem_newgco", +"lj_math_random_step", +"lj_vm_modi", +"lj_carith_divi64", +"lj_carith_divu64", +"lj_carith_modi64", +"lj_carith_modu64", +"lj_carith_powi64", +"lj_carith_powu64", +"lj_cdata_setfin", +"strlen", +"memcpy", +"memset", +"sinh", +"cosh", +"tanh", +"fputc", +"fwrite", +"fflush", +} + +traceerr = { +[0]="error thrown or hook called during recording", +"trace too long", +"trace too deep", +"too many snapshots", +"blacklisted", +"NYI: bytecode %d", +"leaving loop in root trace", +"inner loop in root trace", +"loop unroll limit reached", +"bad argument type", +"call to JIT-disabled function", +"call unroll limit reached", +"down-recursion, restarting", +"NYI: C function %p", +"NYI: FastFunc %s", +"NYI: unsupported variant of FastFunc %s", +"NYI: return to lower frame", +"store with nil or NaN key", +"missing metamethod", +"looping index lookup", +"NYI: mixed sparse/dense table", +"symbol not in cache", +"NYI: unsupported C type conversion", +"NYI: unsupported C function type", +"guard would always fail", +"too many PHIs", +"persistent type instability", +"failed to allocate mcode memory", +"machine code too long", +"hit mcode limit (retrying)", +"too many spill slots", +"inconsistent register allocation", +"NYI: cannot assemble IR instruction %d", +"NYI: PHI shuffling too complex", +"NYI: register coalescing too complex", +} + +ffnames = { +[0]="Lua", +"C", +"assert", +"type", +"getmetatable", +"setmetatable", +"getfenv", +"setfenv", +"rawget", +"rawset", +"rawequal", +"unpack", +"select", +"tonumber", +"tostring", +"next", +"pairs", +"ipairs_aux", +"ipairs", +"error", +"pcall", +"xpcall", +"loadstring", +"loadfile", +"load", +"dofile", +"gcinfo", +"collectgarbage", +"newproxy", +"print", +"coroutine.status", +"coroutine.running", +"coroutine.create", +"coroutine.yield", +"coroutine.resume", +"coroutine.wrap_aux", +"coroutine.wrap", +"math.abs", +"math.floor", +"math.ceil", +"math.sqrt", +"math.log", +"math.log10", +"math.exp", +"math.sin", +"math.cos", +"math.tan", +"math.asin", +"math.acos", +"math.atan", +"math.sinh", +"math.cosh", +"math.tanh", +"math.frexp", +"math.modf", +"math.deg", +"math.rad", +"math.atan2", +"math.pow", +"math.fmod", +"math.ldexp", +"math.min", +"math.max", +"math.random", +"math.randomseed", +"bit.tobit", +"bit.bnot", +"bit.bswap", +"bit.lshift", +"bit.rshift", +"bit.arshift", +"bit.rol", +"bit.ror", +"bit.band", +"bit.bor", +"bit.bxor", +"bit.tohex", +"string.len", +"string.byte", +"string.char", +"string.sub", +"string.rep", +"string.reverse", +"string.lower", +"string.upper", +"string.dump", +"string.find", +"string.match", +"string.gmatch_aux", +"string.gmatch", +"string.gsub", +"string.format", +"table.foreachi", +"table.foreach", +"table.getn", +"table.maxn", +"table.insert", +"table.remove", +"table.concat", +"table.sort", +"io.method.close", +"io.method.read", +"io.method.write", +"io.method.flush", +"io.method.seek", +"io.method.setvbuf", +"io.method.lines", +"io.method.__gc", +"io.method.__tostring", +"io.open", +"io.popen", +"io.tmpfile", +"io.close", +"io.read", +"io.write", +"io.flush", +"io.input", +"io.output", +"io.lines_iter", +"io.lines", +"io.type", +"os.execute", +"os.remove", +"os.rename", +"os.tmpname", +"os.getenv", +"os.exit", +"os.clock", +"os.date", +"os.time", +"os.difftime", +"os.setlocale", +"debug.getregistry", +"debug.getmetatable", +"debug.setmetatable", +"debug.getfenv", +"debug.setfenv", +"debug.getinfo", +"debug.getlocal", +"debug.setlocal", +"debug.getupvalue", +"debug.setupvalue", +"debug.sethook", +"debug.gethook", +"debug.debug", +"debug.traceback", +"jit.on", +"jit.off", +"jit.flush", +"jit.status", +"jit.attach", +"jit.util.funcinfo", +"jit.util.funcbc", +"jit.util.funck", +"jit.util.funcuvname", +"jit.util.traceinfo", +"jit.util.traceir", +"jit.util.tracek", +"jit.util.tracesnap", +"jit.util.tracemc", +"jit.util.traceexitstub", +"jit.util.ircalladdr", +"jit.opt.start", +"ffi.meta.__index", +"ffi.meta.__newindex", +"ffi.meta.__eq", +"ffi.meta.__len", +"ffi.meta.__lt", +"ffi.meta.__le", +"ffi.meta.__concat", +"ffi.meta.__call", +"ffi.meta.__add", +"ffi.meta.__sub", +"ffi.meta.__mul", +"ffi.meta.__div", +"ffi.meta.__mod", +"ffi.meta.__pow", +"ffi.meta.__unm", +"ffi.meta.__tostring", +"ffi.clib.__index", +"ffi.clib.__newindex", +"ffi.clib.__gc", +"ffi.cdef", +"ffi.new", +"ffi.cast", +"ffi.typeof", +"ffi.istype", +"ffi.sizeof", +"ffi.alignof", +"ffi.offsetof", +"ffi.errno", +"ffi.string", +"ffi.copy", +"ffi.fill", +"ffi.abi", +"ffi.metatype", +"ffi.gc", +"ffi.load", +} + diff --git a/game/thirdparty/lanes.lua b/game/thirdparty/lanes.lua index 28b19cf046..87860273b4 100644 --- a/game/thirdparty/lanes.lua +++ b/game/thirdparty/lanes.lua @@ -39,7 +39,7 @@ THE SOFTWARE. ]]-- module( "lanes", package.seeall ) - +do return end --require "lua51-lanes" assert( type(lanes)=="table" ) diff --git a/premake4.lua b/premake4.lua index 17edb82a65..ae092cc0ac 100644 --- a/premake4.lua +++ b/premake4.lua @@ -52,7 +52,7 @@ configuration "Debug" configuration "Release" defines { "NDEBUG=1" } flags { "Optimize", "NoFramePointer" } - buildoptions { "-O3" } + buildoptions { "-O2" } targetdir "bin/Release" diff --git a/src/luajit2/dynasm/dasm_arm.h b/src/luajit2/dynasm/dasm_arm.h new file mode 100644 index 0000000000..87db7f000b --- /dev/null +++ b/src/luajit2/dynasm/dasm_arm.h @@ -0,0 +1,448 @@ +/* +** DynASM ARM encoding engine. +** Copyright (C) 2005-2011 Mike Pall. All rights reserved. +** Released under the MIT/X license. See dynasm.lua for full copyright notice. +*/ + +#include <stddef.h> +#include <stdarg.h> +#include <string.h> +#include <stdlib.h> + +#define DASM_ARCH "arm" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, + DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +static int dasm_imm12(unsigned int n) +{ + int i; + for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30)) + if (n <= 255) return (int)(n + (i << 8)); + return -1; +} + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + if (n >= 0) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */ + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: + case DASM_IMM16: +#ifdef DASM_CHECKS + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); + if ((ins & 0x8000)) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + case DASM_IMML8: + case DASM_IMML12: + CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) : + (((-n)>>((ins>>5)&31)) == 0), RANGE_I); + b[pos++] = n; + break; + case DASM_IMM12: + CK(dasm_imm12((unsigned int)n) != -1, RANGE_I); + b[pos++] = n; + break; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: case DASM_IMM12: case DASM_IMM16: + case DASM_IMML8: case DASM_IMML12: pos++; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048)); + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4; + patchrel: + if ((ins & 0x800) == 0) { + CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL); + cp[-1] |= ((n >> 2) & 0x00ffffff); + } else if ((ins & 0x1000)) { + CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL); + goto patchimml8; + } else { + CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL); + goto patchimml12; + } + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + case DASM_IMM12: + cp[-1] |= dasm_imm12((unsigned int)n); + break; + case DASM_IMM16: + cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff); + break; + case DASM_IMML8: patchimml8: + cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) : + ((-n & 0x0f) | ((-n & 0xf0) << 4)); + break; + case DASM_IMML12: patchimml12: + cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n); + break; + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/src/luajit2/dynasm/dasm_arm.lua b/src/luajit2/dynasm/dasm_arm.lua new file mode 100644 index 0000000000..b8a595b09e --- /dev/null +++ b/src/luajit2/dynasm/dasm_arm.lua @@ -0,0 +1,949 @@ +------------------------------------------------------------------------------ +-- DynASM ARM module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "arm", + description = "DynASM ARM module", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable, rawget = assert, setmetatable, rawget +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub +local concat, sort, insert = table.concat, table.sort, table.insert + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12", +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Return 8 digit hex number. +local function tohex(x) + return sub(format("%08x", x), -8) -- Avoid 64 bit portability problem in Lua. +end + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n <= 0x000fffff then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + if n <= 0x000fffff then + insert(actlist, pos+1, n) + n = map_action.ESC * 0x10000 + end + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. + +-- Ext. register name -> int. name. +local map_archdef = { sp = "r13", lr = "r14", pc = "r15", } + +-- Int. register name -> ext. name. +local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", } + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + return map_reg_rev[s] or s +end + +local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, } + +local map_cond = { + eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7, + hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14, + hs = 2, lo = 3, +} + +------------------------------------------------------------------------------ + +-- Template strings for ARM instructions. +local map_op = { + -- Basic data processing instructions. + and_3 = "e0000000DNPs", + eor_3 = "e0200000DNPs", + sub_3 = "e0400000DNPs", + rsb_3 = "e0600000DNPs", + add_3 = "e0800000DNPs", + adc_3 = "e0a00000DNPs", + sbc_3 = "e0c00000DNPs", + rsc_3 = "e0e00000DNPs", + tst_2 = "e1100000NP", + teq_2 = "e1300000NP", + cmp_2 = "e1500000NP", + cmn_2 = "e1700000NP", + orr_3 = "e1800000DNPs", + mov_2 = "e1a00000DPs", + bic_3 = "e1c00000DNPs", + mvn_2 = "e1e00000DPs", + + and_4 = "e0000000DNMps", + eor_4 = "e0200000DNMps", + sub_4 = "e0400000DNMps", + rsb_4 = "e0600000DNMps", + add_4 = "e0800000DNMps", + adc_4 = "e0a00000DNMps", + sbc_4 = "e0c00000DNMps", + rsc_4 = "e0e00000DNMps", + tst_3 = "e1100000NMp", + teq_3 = "e1300000NMp", + cmp_3 = "e1500000NMp", + cmn_3 = "e1700000NMp", + orr_4 = "e1800000DNMps", + mov_3 = "e1a00000DMps", + bic_4 = "e1c00000DNMps", + mvn_3 = "e1e00000DMps", + + lsl_3 = "e1a00000DMws", + lsr_3 = "e1a00020DMws", + asr_3 = "e1a00040DMws", + ror_3 = "e1a00060DMws", + rrx_2 = "e1a00060DMs", + + -- Multiply and multiply-accumulate. + mul_3 = "e0000090NMSs", + mla_4 = "e0200090NMSDs", + umaal_4 = "e0400090DNMSs", -- v6 + mls_4 = "e0600090DNMSs", -- v6T2 + umull_4 = "e0800090DNMSs", + umlal_4 = "e0a00090DNMSs", + smull_4 = "e0c00090DNMSs", + smlal_4 = "e0e00090DNMSs", + + -- Halfword multiply and multiply-accumulate. + smlabb_4 = "e1000080NMSD", -- v5TE + smlatb_4 = "e10000a0NMSD", -- v5TE + smlabt_4 = "e10000c0NMSD", -- v5TE + smlatt_4 = "e10000e0NMSD", -- v5TE + smlawb_4 = "e1200080NMSD", -- v5TE + smulwb_3 = "e12000a0NMS", -- v5TE + smlawt_4 = "e12000c0NMSD", -- v5TE + smulwt_3 = "e12000e0NMS", -- v5TE + smlalbb_4 = "e1400080NMSD", -- v5TE + smlaltb_4 = "e14000a0NMSD", -- v5TE + smlalbt_4 = "e14000c0NMSD", -- v5TE + smlaltt_4 = "e14000e0NMSD", -- v5TE + smulbb_3 = "e1600080NMS", -- v5TE + smultb_3 = "e16000a0NMS", -- v5TE + smulbt_3 = "e16000c0NMS", -- v5TE + smultt_3 = "e16000e0NMS", -- v5TE + + -- Miscellaneous data processing instructions. + clz_2 = "e16f0f10DM", -- v5T + rev_2 = "e6bf0f30DM", -- v6 + rev16_2 = "e6bf0fb0DM", -- v6 + revsh_2 = "e6ff0fb0DM", -- v6 + sel_3 = "e6800fb0DNM", -- v6 + usad8_3 = "e780f010NMS", -- v6 + usada8_4 = "e7800010NMSD", -- v6 + rbit_2 = "e6ff0f30DM", -- v6T2 + movw_2 = "e3000000DW", -- v6T2 + movt_2 = "e3400000DW", -- v6T2 + -- Note: the X encodes width-1, not width. + sbfx_4 = "e7a00050DMvX", -- v6T2 + ubfx_4 = "e7e00050DMvX", -- v6T2 + -- Note: the X encodes the msb field, not the width. + bfc_3 = "e7c0001fDvX", -- v6T2 + bfi_4 = "e7c00010DMvX", -- v6T2 + + -- Packing and unpacking instructions. + pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6 + pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6 + sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6 + sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6 + sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6 + sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6 + sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6 + sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6 + uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6 + uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6 + uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6 + uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6 + uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6 + uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6 + + -- Saturating instructions. + qadd_3 = "e1000050DMN", -- v5TE + qsub_3 = "e1200050DMN", -- v5TE + qdadd_3 = "e1400050DMN", -- v5TE + qdsub_3 = "e1600050DMN", -- v5TE + -- Note: the X for ssat* encodes sat_imm-1, not sat_imm. + ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6 + usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6 + ssat16_3 = "e6a00f30DXM", -- v6 + usat16_3 = "e6e00f30DXM", -- v6 + + -- Parallel addition and subtraction. + sadd16_3 = "e6100f10DNM", -- v6 + sasx_3 = "e6100f30DNM", -- v6 + ssax_3 = "e6100f50DNM", -- v6 + ssub16_3 = "e6100f70DNM", -- v6 + sadd8_3 = "e6100f90DNM", -- v6 + ssub8_3 = "e6100ff0DNM", -- v6 + qadd16_3 = "e6200f10DNM", -- v6 + qasx_3 = "e6200f30DNM", -- v6 + qsax_3 = "e6200f50DNM", -- v6 + qsub16_3 = "e6200f70DNM", -- v6 + qadd8_3 = "e6200f90DNM", -- v6 + qsub8_3 = "e6200ff0DNM", -- v6 + shadd16_3 = "e6300f10DNM", -- v6 + shasx_3 = "e6300f30DNM", -- v6 + shsax_3 = "e6300f50DNM", -- v6 + shsub16_3 = "e6300f70DNM", -- v6 + shadd8_3 = "e6300f90DNM", -- v6 + shsub8_3 = "e6300ff0DNM", -- v6 + uadd16_3 = "e6500f10DNM", -- v6 + uasx_3 = "e6500f30DNM", -- v6 + usax_3 = "e6500f50DNM", -- v6 + usub16_3 = "e6500f70DNM", -- v6 + uadd8_3 = "e6500f90DNM", -- v6 + usub8_3 = "e6500ff0DNM", -- v6 + uqadd16_3 = "e6600f10DNM", -- v6 + uqasx_3 = "e6600f30DNM", -- v6 + uqsax_3 = "e6600f50DNM", -- v6 + uqsub16_3 = "e6600f70DNM", -- v6 + uqadd8_3 = "e6600f90DNM", -- v6 + uqsub8_3 = "e6600ff0DNM", -- v6 + uhadd16_3 = "e6700f10DNM", -- v6 + uhasx_3 = "e6700f30DNM", -- v6 + uhsax_3 = "e6700f50DNM", -- v6 + uhsub16_3 = "e6700f70DNM", -- v6 + uhadd8_3 = "e6700f90DNM", -- v6 + uhsub8_3 = "e6700ff0DNM", -- v6 + + -- Load/store instructions. + str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL", + strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL", + ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL", + ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL", + strh_2 = "e00000b0DL", strh_3 = "e00000b0DL", + ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL", + ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE + ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL", + strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE + ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL", + + ldm_2 = "e8900000nR", ldmia_2 = "e8900000nR", ldmfd_2 = "e8900000nR", + ldmda_2 = "e8100000nR", ldmfa_2 = "e8100000nR", + ldmdb_2 = "e9100000nR", ldmea_2 = "e9100000nR", + ldmib_2 = "e9900000nR", ldmed_2 = "e9900000nR", + stm_2 = "e8800000nR", stmia_2 = "e8800000nR", stmfd_2 = "e8800000nR", + stmda_2 = "e8000000nR", stmfa_2 = "e8000000nR", + stmdb_2 = "e9000000nR", stmea_2 = "e9000000nR", + stmib_2 = "e9800000nR", stmed_2 = "e9800000nR", + pop_1 = "e8bd0000R", push_1 = "e92d0000R", + + -- Branch instructions. + b_1 = "ea000000B", + bl_1 = "eb000000B", + blx_1 = "e12fff30C", + bx_1 = "e12fff10M", + + -- Miscellaneous instructions. + nop_0 = "e1a00000", + mrs_1 = "e10f0000D", + bkpt_1 = "e1200070K", -- v5T + svc_1 = "ef000000T", swi_1 = "ef000000T", + ud_0 = "e7f001f0", + + -- NYI: Advanced SIMD and VFP instructions. + + -- NYI instructions, since I have no need for them right now: + -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh + -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe + -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb + -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2 +} + +-- Add mnemonics for "s" variants. +do + local t = {} + for k,v in pairs(map_op) do + if sub(v, -1) == "s" then + local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2) + t[sub(k, 1, -3).."s"..sub(k, -2)] = v2 + end + end + for k,v in pairs(t) do + map_op[k] = v + end +end + +------------------------------------------------------------------------------ + +local function parse_gpr(expr) + local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local r = match(expr, "^r(1?[0-9])$") + if r then + r = tonumber(r) + if r <= 15 then return r, tp end + end + werror("bad register name `"..expr.."'") +end + +local function parse_gpr_pm(expr) + local pm, expr2 = match(expr, "^([+-]?)(.*)$") + return parse_gpr(expr2), (pm == "-") +end + +local function parse_reglist(reglist) + reglist = match(reglist, "^{%s*([^}]*)}$") + if not reglist then werror("register list expected") end + local rr = 0 + for p in gmatch(reglist..",", "%s*([^,]*),") do + local rbit = 2^parse_gpr(gsub(p, "%s+$", "")) + if ((rr - (rr % rbit)) / rbit) % 2 ~= 0 then + werror("duplicate register `"..p.."'") + end + rr = rr + rbit + end + return rr +end + +local function parse_imm(imm, bits, shift, scale, signed) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = tonumber(imm) + if n then + if n % 2^scale == 0 then + n = n / 2^scale + if signed then + if n >= 0 then + if n < 2^(bits-1) then return n*2^shift end + else + if n >= -(2^(bits-1))-1 then return (n+2^bits)*2^shift end + end + else + if n >= 0 and n <= 2^bits-1 then return n*2^shift end + end + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_imm12(imm) + local n = tonumber(imm) + if n then + local m = n + for i=0,-15,-1 do + if m >= 0 and m <= 255 and n % 1 == 0 then return m + (i%16) * 256 end + local t = m % 4 + m = (m - t) / 4 + t * 2^30 + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM12", 0, imm) + return 0 + end +end + +local function parse_imm16(imm) + imm = match(imm, "^#(.*)$") + if not imm then werror("expected immediate operand") end + local n = tonumber(imm) + if n then + if n >= 0 and n <= 65535 and n % 1 == 0 then + local t = n % 4096 + return (n - t) * 16 + t + end + werror("out of range immediate `"..imm.."'") + else + waction("IMM16", 32*16, imm) + return 0 + end +end + +local function parse_imm_load(imm, ext) + local n = tonumber(imm) + if n then + if ext then + if n >= -255 and n <= 255 then + local up = 0x00800000 + if n < 0 then n = -n; up = 0 end + return (n-(n%16))*16+(n%16) + up + end + else + if n >= -4095 and n <= 4095 then + if n >= 0 then return n+0x00800000 end + return -n + end + end + werror("out of range immediate `"..imm.."'") + else + waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12), imm) + return 0 + end +end + +local function parse_shift(shift, gprok) + if shift == "rrx" then + return 3 * 32 + else + local s, s2 = match(shift, "^(%S+)%s*(.*)$") + s = map_shift[s] + if not s then werror("expected shift operand") end + if sub(s2, 1, 1) == "#" then + return parse_imm(s2, 5, 7, 0, false) + s * 32 + else + if not gprok then werror("expected immediate shift operand") end + return parse_gpr(s2) * 256 + s * 32 + 16 + end + end +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +local function parse_load(params, nparams, n, op) + local oplo = op % 256 + local ext, ldrd = (oplo ~= 0), (oplo == 208) + local d + if (ldrd or oplo == 240) then + d = ((op - (op % 4096)) / 4096) % 16 + if d % 2 ~= 0 then werror("odd destination register") end + end + local pn = params[n] + local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$") + local p2 = params[n+1] + if not p1 then + if not p2 then + if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then + local mode, n, s = parse_label(pn, false) + waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1) + return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0) + end + local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local d, tp = parse_gpr(reg) + if tp then + waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12), + format(tp.ctypefmt, tailr)) + return op + d * 65536 + 0x01000000 + (ext and 0x00400000 or 0) + end + end + end + werror("expected address operand") + end + if wb == "!" then op = op + 0x00200000 end + if p2 then + if wb == "!" then werror("bad use of '!'") end + local p3 = params[n+2] + op = op + parse_gpr(p1) * 65536 + local imm = match(p2, "^#(.*)$") + if imm then + local m = parse_imm_load(imm, ext) + if p3 then werror("too many parameters") end + op = op + m + (ext and 0x00400000 or 0) + else + local m, neg = parse_gpr_pm(p2) + if ldrd and (m == d or m-1 == d) then werror("register conflict") end + op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) + if p3 then op = op + parse_shift(p3) end + end + else + local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$") + op = op + parse_gpr(p1a) * 65536 + 0x01000000 + if p2 ~= "" then + local imm = match(p2, "^,%s*#(.*)$") + if imm then + local m = parse_imm_load(imm, ext) + op = op + m + (ext and 0x00400000 or 0) + else + local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$") + local m, neg = parse_gpr_pm(p2a) + if ldrd and (m == d or m-1 == d) then werror("register conflict") end + op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000) + if p3 ~= "" then + if ext then werror("too many parameters") end + op = op + parse_shift(p3) + end + end + else + if wb == "!" then werror("bad use of '!'") end + op = op + (ext and 0x00c00000 or 0x00800000) + end + end + return op +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +map_op[".template__"] = function(params, template, nparams) + if not params then return sub(template, 9) end + local op = tonumber(sub(template, 1, 8), 16) + local n = 1 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 3 positions. + if secpos+3 > maxsecpos then wflush() end + local pos = wpos() + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + if p == "D" then + op = op + parse_gpr(params[n]) * 4096; n = n + 1 + elseif p == "N" then + op = op + parse_gpr(params[n]) * 65536; n = n + 1 + elseif p == "S" then + op = op + parse_gpr(params[n]) * 256; n = n + 1 + elseif p == "M" then + op = op + parse_gpr(params[n]); n = n + 1 + elseif p == "P" then + local imm = match(params[n], "^#(.*)$") + if imm then + op = op + parse_imm12(imm) + 0x02000000 + else + op = op + parse_gpr(params[n]) + end + n = n + 1 + elseif p == "p" then + op = op + parse_shift(params[n], true); n = n + 1 + elseif p == "L" then + op = parse_load(params, nparams, n, op) + elseif p == "B" then + local mode, n, s = parse_label(params[n], false) + waction("REL_"..mode, n, s, 1) + elseif p == "C" then -- blx gpr vs. blx label. + local p = params[n] + if match(p, "^([%w_]+):(r1?[0-9])$") or match(p, "^r(1?[0-9])$") then + op = op + parse_gpr(p) + else + if op < 0xe0000000 then werror("unconditional instruction") end + local mode, n, s = parse_label(p, false) + waction("REL_"..mode, n, s, 1) + op = 0xfa000000 + end + elseif p == "n" then + local r, wb = match(params[n], "^([^!]*)(!?)$") + op = op + parse_gpr(r) * 65536 + (wb == "!" and 0x00200000 or 0) + n = n + 1 + elseif p == "R" then + op = op + parse_reglist(params[n]); n = n + 1 + elseif p == "W" then + op = op + parse_imm16(params[n]); n = n + 1 + elseif p == "v" then + op = op + parse_imm(params[n], 5, 7, 0, false); n = n + 1 + elseif p == "w" then + local imm = match(params[n], "^#(.*)$") + if imm then + op = op + parse_imm(params[n], 5, 7, 0, false); n = n + 1 + else + op = op + parse_gpr(params[n]) * 256 + 16 + end + elseif p == "X" then + op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1 + elseif p == "K" then + local imm = tonumber(match(params[n], "^#(.*)$")); n = n + 1 + if not imm or imm % 1 ~= 0 or imm < 0 or imm > 0xffff then + werror("bad immediate operand") + end + local t = imm % 16 + op = op + (imm - t) * 16 + t + elseif p == "T" then + op = op + parse_imm(params[n], 24, 0, 0, false); n = n + 1 + elseif p == "s" then + -- Ignored. + else + assert(false) + end + end + wputpos(pos, op) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = function(t, k) + local v = map_coreop[k] + if v then return v end + local cc = sub(k, -4, -3) + local cv = map_cond[cc] + if cv then + local v = rawget(t, sub(k, 1, -5)..sub(k, -2)) + if type(v) == "string" then return format("%x%s", cv, sub(v, 2)) end + end + end }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/src/luajit2/dynasm/dasm_ppc.h b/src/luajit2/dynasm/dasm_ppc.h new file mode 100644 index 0000000000..e928ffedd3 --- /dev/null +++ b/src/luajit2/dynasm/dasm_ppc.h @@ -0,0 +1,408 @@ +/* +** DynASM PPC encoding engine. +** Copyright (C) 2005-2011 Mike Pall. All rights reserved. +** Released under the MIT/X license. See dynasm.lua for full copyright notice. +*/ + +#include <stddef.h> +#include <stdarg.h> +#include <string.h> +#include <stdlib.h> + +#define DASM_ARCH "ppc" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. */ +enum { + DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT, + /* The following actions need a buffer position. */ + DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG, + /* The following actions also have an argument. */ + DASM_REL_PC, DASM_LABEL_PC, DASM_IMM, + DASM__MAX +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_REL 0x15000000 +#define DASM_S_UNDEF_LG 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned int *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + if (action >= DASM__MAX) { + ofs += 4; + } else { + int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0; + switch (action) { + case DASM_STOP: goto stop; + case DASM_SECTION: + n = (ins & 255); CK(n < D->maxsection, RANGE_SEC); + D->section = &D->sections[n]; goto stop; + case DASM_ESC: p++; ofs += 4; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break; + case DASM_REL_LG: + n = (ins & 2047) - 10; pl = D->lglabels + n; + if (n >= 0) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */ + pl += 10; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + break; + case DASM_LABEL_LG: + pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: + pl = D->pclabels + n; CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; + } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_IMM: +#ifdef DASM_CHECKS + CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I); + if (ins & 0x8000) + CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I); + else + CK((n>>((ins>>5)&31)) == 0, RANGE_I); +#endif + b[pos++] = n; + break; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: p++; break; + case DASM_REL_EXT: break; + case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break; + case DASM_REL_LG: case DASM_REL_PC: pos++; break; + case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break; + case DASM_IMM: pos++; break; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0) +#else +#define CK(x, st) ((void)0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + char *base = (char *)buffer; + unsigned int *cp = (unsigned int *)buffer; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + while (1) { + unsigned int ins = *p++; + unsigned int action = (ins >> 16); + int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0; + switch (action) { + case DASM_STOP: case DASM_SECTION: goto stop; + case DASM_ESC: *cp++ = *p++; break; + case DASM_REL_EXT: + n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1); + goto patchrel; + case DASM_ALIGN: + ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000; + break; + case DASM_REL_LG: + CK(n >= 0, UNDEF_LG); + case DASM_REL_PC: + CK(n >= 0, UNDEF_PC); + n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base); + patchrel: + CK((n & 3) == 0 && + (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >> + ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL); + cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc)); + break; + case DASM_LABEL_LG: + ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n); + break; + case DASM_LABEL_PC: break; + case DASM_IMM: + cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31); + break; + default: *cp++ = ins; break; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != (char *)cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} +#undef CK + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(D->section-D->sections); + return D->status; +} +#endif + diff --git a/src/luajit2/dynasm/dasm_ppc.lua b/src/luajit2/dynasm/dasm_ppc.lua new file mode 100644 index 0000000000..bb6de01e2b --- /dev/null +++ b/src/luajit2/dynasm/dasm_ppc.lua @@ -0,0 +1,1225 @@ +------------------------------------------------------------------------------ +-- DynASM PPC module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +-- Module information: +local _info = { + arch = "ppc", + description = "DynASM PPC module", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, setmetatable = assert, setmetatable +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local match, gmatch = _s.match, _s.gmatch +local concat, sort = table.concat, table.sort + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + "STOP", "SECTION", "ESC", "REL_EXT", + "ALIGN", "REL_LG", "LABEL_LG", + "REL_PC", "LABEL_PC", "IMM", +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number. +local map_action = {} +for n,name in ipairs(action_names) do + map_action[name] = n-1 +end + +-- Action list buffer. +local actlist = {} + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Return 8 digit hex number. +local function tohex(x) + return sub(format("%08x", x), -8) -- Avoid 64 bit portability problem in Lua. +end + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + if nn == 0 then nn = 1; actlist[0] = map_action.STOP end + out:write("static const unsigned int ", name, "[", nn, "] = {\n") + for i = 1,nn-1 do + assert(out:write("0x", tohex(actlist[i]), ",\n")) + end + assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n")) +end + +------------------------------------------------------------------------------ + +-- Add word to action list. +local function wputxw(n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, val, a, num) + local w = assert(map_action[action], "bad action name `"..action.."'") + wputxw(w * 0x10000 + (val or 0)) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + if #actlist == actargs[1] then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true) + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped word. +local function wputw(n) + if n <= 0xffffff then waction("ESC") end + wputxw(n) +end + +-- Reserve position for word. +local function wpos() + local pos = #actlist+1 + actlist[pos] = "" + return pos +end + +-- Store word to reserved position. +local function wputpos(pos, n) + assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range") + actlist[pos] = n +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 20 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end + local n = next_global + if n > 2047 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=20,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=20,next_global-1 do + out:write(" ", prefix, t[i], ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=20,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = 0 +local map_extern_ = {} +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n > 2047 then werror("too many extern labels") end + next_extern = n + 1 + t[name] = n + map_extern_[n] = name + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + out:write("Extern labels:\n") + for i=0,next_extern-1 do + out:write(format(" %s\n", map_extern_[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + out:write("static const char *const ", name, "[] = {\n") + for i=0,next_extern-1 do + out:write(" \"", map_extern_[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. +local map_archdef = { sp = "r1" } -- Ext. register name -> int. name. + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for Dt... macros). + +-- Reverse defines for registers. +function _M.revdef(s) + if s == "r1" then return "sp" end + return s +end + +local map_cond = { + lt = 0, gt = 1, eq = 2, so = 3, + ge = 4, le = 5, ne = 6, ns = 7, +} + +------------------------------------------------------------------------------ + +-- Template strings for PPC instructions. +local map_op = { + tdi_3 = "08000000ARI", + twi_3 = "0c000000ARI", + mulli_3 = "1c000000RRI", + subfic_3 = "20000000RRI", + cmplwi_3 = "28000000XRU", + cmplwi_2 = "28000000-RU", + cmpldi_3 = "28200000XRU", + cmpldi_2 = "28200000-RU", + cmpwi_3 = "2c000000XRI", + cmpwi_2 = "2c000000-RI", + cmpdi_3 = "2c200000XRI", + cmpdi_2 = "2c200000-RI", + addic_3 = "30000000RRI", + ["addic._3"] = "34000000RRI", + addi_3 = "38000000RR0I", + li_2 = "38000000RI", + la_2 = "38000000RD", + addis_3 = "3c000000RR0I", + lis_2 = "3c000000RI", + lus_2 = "3c000000RU", + bc_3 = "40000000AAK", + bcl_3 = "40000001AAK", + bdnz_1 = "42000000K", + bdz_1 = "42400000K", + sc_0 = "44000000", + b_1 = "48000000J", + bl_1 = "48000001J", + rlwimi_5 = "50000000RR~AAA.", + rlwinm_5 = "54000000RR~AAA.", + rlwnm_5 = "5c000000RR~RAA.", + ori_3 = "60000000RR~U", + nop_0 = "60000000", + oris_3 = "64000000RR~U", + xori_3 = "68000000RR~U", + xoris_3 = "6c000000RR~U", + ["andi._3"] = "70000000RR~U", + ["andis._3"] = "74000000RR~U", + lwz_2 = "80000000RD", + lwzu_2 = "84000000RD", + lbz_2 = "88000000RD", + lbzu_2 = "8c000000RD", + stw_2 = "90000000RD", + stwu_2 = "94000000RD", + stb_2 = "98000000RD", + stbu_2 = "9c000000RD", + lhz_2 = "a0000000RD", + lhzu_2 = "a4000000RD", + lha_2 = "a8000000RD", + lhau_2 = "ac000000RD", + sth_2 = "b0000000RD", + sthu_2 = "b4000000RD", + lmw_2 = "b8000000RD", + stmw_2 = "bc000000RD", + lfs_2 = "c0000000FD", + lfsu_2 = "c4000000FD", + lfd_2 = "c8000000FD", + lfdu_2 = "cc000000FD", + stfs_2 = "d0000000FD", + stfsu_2 = "d4000000FD", + stfd_2 = "d8000000FD", + stfdu_2 = "dc000000FD", + ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4. + ldu_2 = "e8000001RD", + lwa_2 = "e8000002RD", + std_2 = "f8000000RD", + stdu_2 = "f8000001RD", + + -- Primary opcode 19: + mcrf_2 = "4c000000XX", + isync_0 = "4c00012c", + crnor_3 = "4c000042CCC", + crnot_2 = "4c000042CC=", + crandc_3 = "4c000102CCC", + crxor_3 = "4c000182CCC", + crclr_1 = "4c000182C==", + crnand_3 = "4c0001c2CCC", + crand_3 = "4c000202CCC", + creqv_3 = "4c000242CCC", + crset_1 = "4c000242C==", + crorc_3 = "4c000342CCC", + cror_3 = "4c000382CCC", + crmove_2 = "4c000382CC=", + bclr_2 = "4c000020AA", + bclrl_2 = "4c000021AA", + bcctr_2 = "4c000420AA", + bcctrl_2 = "4c000421AA", + blr_0 = "4e800020", + blrl_0 = "4e800021", + bctr_0 = "4e800420", + bctrl_0 = "4e800421", + + -- Primary opcode 31: + cmpw_3 = "7c000000XRR", + cmpw_2 = "7c000000-RR", + cmpd_3 = "7c200000XRR", + cmpd_2 = "7c200000-RR", + tw_3 = "7c000008ARR", + subfc_3 = "7c000010RRR.", + subc_3 = "7c000010RRR~.", + mulhdu_3 = "7c000012RRR.", + addc_3 = "7c000014RRR.", + mulhwu_3 = "7c000016RRR.", + isel_4 = "7c00001eRRRC", + isellt_3 = "7c00001eRRR", + iselgt_3 = "7c00005eRRR", + iseleq_3 = "7c00009eRRR", + mfcr_1 = "7c000026R", + -- NYI: mtcrf, mtocrf, mfocrf + lwarx_3 = "7c000028RR0R", + ldx_3 = "7c00002aRR0R", + lwzx_3 = "7c00002eRR0R", + slw_3 = "7c000030RR~R.", + cntlzw_2 = "7c000034RR~", + sld_3 = "7c000036RR~R.", + and_3 = "7c000038RR~R.", + cmplw_3 = "7c000040XRR", + cmplw_2 = "7c000040-RR", + cmpld_3 = "7c200040XRR", + cmpld_2 = "7c200040-RR", + subf_3 = "7c000050RRR.", + sub_3 = "7c000050RRR~.", + ldux_3 = "7c00006aRR0R", + dcbst_2 = "7c00006c-RR", + lwzux_3 = "7c00006eRR0R", + cntlzd_2 = "7c000074RR~", + andc_3 = "7c000078RR~R.", + td_3 = "7c000088ARR", + mulhd_3 = "7c000092RRR.", + mulhw_3 = "7c000096RRR.", + ldarx_3 = "7c0000a8RR0R", + dcbf_2 = "7c0000ac-RR", + lbzx_3 = "7c0000aeRR0R", + neg_2 = "7c0000d0RR.", + lbzux_3 = "7c0000eeRR0R", + popcntb_2 = "7c0000f4RR~", + not_2 = "7c0000f8RR~%.", + nor_3 = "7c0000f8RR~R.", + subfe_3 = "7c000110RRR.", + sube_3 = "7c000110RRR~.", + adde_3 = "7c000114RRR.", + stdx_3 = "7c00012aRR0R", + stwcx_3 = "7c00012cRR0R.", + stwx_3 = "7c00012eRR0R", + prtyw_2 = "7c000134RR~", + stdux_3 = "7c00016aRR0R", + stwux_3 = "7c00016eRR0R", + prtyd_2 = "7c000174RR~", + subfze_2 = "7c000190RR.", + addze_2 = "7c000194RR.", + stdcx_3 = "7c0001acRR0R.", + stbx_3 = "7c0001aeRR0R", + subfme_2 = "7c0001d0RR.", + mulld_3 = "7c0001d2RRR.", + addme_2 = "7c0001d4RR.", + mullw_3 = "7c0001d6RRR.", + dcbtst_2 = "7c0001ec-RR", + stbux_3 = "7c0001eeRR0R", + add_3 = "7c000214RRR.", + dcbt_2 = "7c00022c-RR", + lhzx_3 = "7c00022eRR0R", + eqv_3 = "7c000238RR~R.", + eciwx_3 = "7c00026cRR0R", + lhzux_3 = "7c00026eRR0R", + xor_3 = "7c000278RR~R.", + mfspefscr_1 = "7c0082a6R", + mfxer_1 = "7c0102a6R", + mflr_1 = "7c0802a6R", + mfctr_1 = "7c0902a6R", + lwax_3 = "7c0002aaRR0R", + lhax_3 = "7c0002aeRR0R", + mftb_1 = "7c0c42e6R", + mftbu_1 = "7c0d42e6R", + lwaux_3 = "7c0002eaRR0R", + lhaux_3 = "7c0002eeRR0R", + sthx_3 = "7c00032eRR0R", + orc_3 = "7c000338RR~R.", + ecowx_3 = "7c00036cRR0R", + sthux_3 = "7c00036eRR0R", + or_3 = "7c000378RR~R.", + mr_2 = "7c000378RR~%.", + divdu_3 = "7c000392RRR.", + divwu_3 = "7c000396RRR.", + mtspefscr_1 = "7c0083a6R", + mtxer_1 = "7c0103a6R", + mtlr_1 = "7c0803a6R", + mtctr_1 = "7c0903a6R", + dcbi_2 = "7c0003ac-RR", + nand_3 = "7c0003b8RR~R.", + divd_3 = "7c0003d2RRR.", + divw_3 = "7c0003d6RRR.", + cmpb_3 = "7c0003f8RR~R.", + mcrxr_1 = "7c000400X", + subfco_3 = "7c000410RRR.", + subco_3 = "7c000410RRR~.", + addco_3 = "7c000414RRR.", + ldbrx_3 = "7c000428RR0R", + lswx_3 = "7c00042aRR0R", + lwbrx_3 = "7c00042cRR0R", + lfsx_3 = "7c00042eFR0R", + srw_3 = "7c000430RR~R.", + srd_3 = "7c000436RR~R.", + subfo_3 = "7c000450RRR.", + subo_3 = "7c000450RRR~.", + lfsux_3 = "7c00046eFR0R", + lswi_3 = "7c0004aaRR0A", + sync_0 = "7c0004ac", + lwsync_0 = "7c2004ac", + ptesync_0 = "7c4004ac", + lfdx_3 = "7c0004aeFR0R", + nego_2 = "7c0004d0RR.", + lfdux_3 = "7c0004eeFR0R", + subfeo_3 = "7c000510RRR.", + subeo_3 = "7c000510RRR~.", + addeo_3 = "7c000514RRR.", + stdbrx_3 = "7c000528RR0R", + stswx_3 = "7c00052aRR0R", + stwbrx_3 = "7c00052cRR0R", + stfsx_3 = "7c00052eFR0R", + stfsux_3 = "7c00056eFR0R", + subfzeo_2 = "7c000590RR.", + addzeo_2 = "7c000594RR.", + stswi_3 = "7c0005aaRR0A", + stfdx_3 = "7c0005aeFR0R", + subfmeo_2 = "7c0005d0RR.", + mulldo_3 = "7c0005d2RRR.", + addmeo_2 = "7c0005d4RR.", + mullwo_3 = "7c0005d6RRR.", + dcba_2 = "7c0005ec-RR", + stfdux_3 = "7c0005eeFR0R", + addo_3 = "7c000614RRR.", + lhbrx_3 = "7c00062cRR0R", + sraw_3 = "7c000630RR~R.", + srad_3 = "7c000634RR~R.", + srawi_3 = "7c000670RR~A.", + eieio_0 = "7c0006ac", + lfiwax_3 = "7c0006aeFR0R", + sthbrx_3 = "7c00072cRR0R", + extsh_2 = "7c000734RR~.", + extsb_2 = "7c000774RR~.", + divduo_3 = "7c000792RRR.", + divwou_3 = "7c000796RRR.", + icbi_2 = "7c0007ac-RR", + stfiwx_3 = "7c0007aeFR0R", + extsw_2 = "7c0007b4RR~.", + divdo_3 = "7c0007d2RRR.", + divwo_3 = "7c0007d6RRR.", + dcbz_2 = "7c0007ec-RR", + + -- Primary opcode 59: + fdivs_3 = "ec000024FFF.", + fsubs_3 = "ec000028FFF.", + fadds_3 = "ec00002aFFF.", + fsqrts_2 = "ec00002cF-F.", + fres_2 = "ec000030F-F.", + fmuls_3 = "ec000032FF-F.", + frsqrtes_2 = "ec000034F-F.", + fmsubs_4 = "ec000038FFFF~.", + fmadds_4 = "ec00003aFFFF~.", + fnmsubs_4 = "ec00003cFFFF~.", + fnmadds_4 = "ec00003eFFFF~.", + + -- Primary opcode 63: + fdiv_3 = "fc000024FFF.", + fsub_3 = "fc000028FFF.", + fadd_3 = "fc00002aFFF.", + fsqrt_2 = "fc00002cF-F.", + fsel_4 = "fc00002eFFFF~.", + fre_2 = "fc000030F-F.", + fmul_3 = "fc000032FF-F.", + frsqrte_2 = "fc000034F-F.", + fmsub_4 = "fc000038FFFF~.", + fmadd_4 = "fc00003aFFFF~.", + fnmsub_4 = "fc00003cFFFF~.", + fnmadd_4 = "fc00003eFFFF~.", + fcmpu_3 = "fc000000XFF", + fcpsgn_3 = "fc000010FFF.", + fcmpo_3 = "fc000040XFF", + mtfsb1_1 = "fc00004cA", + fneg_2 = "fc000050F-F.", + mcrfs_2 = "fc000080XX", + mtfsb0_1 = "fc00008cA", + fmr_2 = "fc000090F-F.", + frsp_2 = "fc000018F-F.", + fctiw_2 = "fc00001cF-F.", + fctiwz_2 = "fc00001eF-F.", + mtfsfi_2 = "fc00010cAA", -- NYI: upshift. + fnabs_2 = "fc000110F-F.", + fabs_2 = "fc000210F-F.", + frin_2 = "fc000310F-F.", + friz_2 = "fc000350F-F.", + frip_2 = "fc000390F-F.", + frim_2 = "fc0003d0F-F.", + mffs_1 = "fc00048eF.", + mtfsf_1 = "fc00058eF.", + fctid_2 = "fc00065cF-F.", + fctidz_2 = "fc00065eF-F.", + fcfid_2 = "fc00069cF-F.", + + -- Primary opcode 4, SPE APU extension: + evaddw_3 = "10000200RRR", + evaddiw_3 = "10000202RAR~", + evsubw_3 = "10000204RRR~", + evsubiw_3 = "10000206RAR~", + evabs_2 = "10000208RR", + evneg_2 = "10000209RR", + evextsb_2 = "1000020aRR", + evextsh_2 = "1000020bRR", + evrndw_2 = "1000020cRR", + evcntlzw_2 = "1000020dRR", + evcntlsw_2 = "1000020eRR", + brinc_3 = "1000020fRRR", + evand_3 = "10000211RRR", + evandc_3 = "10000212RRR", + evxor_3 = "10000216RRR", + evor_3 = "10000217RRR", + evmr_2 = "10000217RR=", + evnor_3 = "10000218RRR", + evnot_2 = "10000218RR=", + eveqv_3 = "10000219RRR", + evorc_3 = "1000021bRRR", + evnand_3 = "1000021eRRR", + evsrwu_3 = "10000220RRR", + evsrws_3 = "10000221RRR", + evsrwiu_3 = "10000222RRA", + evsrwis_3 = "10000223RRA", + evslw_3 = "10000224RRR", + evslwi_3 = "10000226RRA", + evrlw_3 = "10000228RRR", + evsplati_2 = "10000229RS", + evrlwi_3 = "1000022aRRA", + evsplatfi_2 = "1000022bRS", + evmergehi_3 = "1000022cRRR", + evmergelo_3 = "1000022dRRR", + evcmpgtu_3 = "10000230XRR", + evcmpgtu_2 = "10000230-RR", + evcmpgts_3 = "10000231XRR", + evcmpgts_2 = "10000231-RR", + evcmpltu_3 = "10000232XRR", + evcmpltu_2 = "10000232-RR", + evcmplts_3 = "10000233XRR", + evcmplts_2 = "10000233-RR", + evcmpeq_3 = "10000234XRR", + evcmpeq_2 = "10000234-RR", + evsel_4 = "10000278RRRW", + evsel_3 = "10000278RRR", + evfsadd_3 = "10000280RRR", + evfssub_3 = "10000281RRR", + evfsabs_2 = "10000284RR", + evfsnabs_2 = "10000285RR", + evfsneg_2 = "10000286RR", + evfsmul_3 = "10000288RRR", + evfsdiv_3 = "10000289RRR", + evfscmpgt_3 = "1000028cXRR", + evfscmpgt_2 = "1000028c-RR", + evfscmplt_3 = "1000028dXRR", + evfscmplt_2 = "1000028d-RR", + evfscmpeq_3 = "1000028eXRR", + evfscmpeq_2 = "1000028e-RR", + evfscfui_2 = "10000290R-R", + evfscfsi_2 = "10000291R-R", + evfscfuf_2 = "10000292R-R", + evfscfsf_2 = "10000293R-R", + evfsctui_2 = "10000294R-R", + evfsctsi_2 = "10000295R-R", + evfsctuf_2 = "10000296R-R", + evfsctsf_2 = "10000297R-R", + evfsctuiz_2 = "10000298R-R", + evfsctsiz_2 = "1000029aR-R", + evfststgt_3 = "1000029cXRR", + evfststgt_2 = "1000029c-RR", + evfststlt_3 = "1000029dXRR", + evfststlt_2 = "1000029d-RR", + evfststeq_3 = "1000029eXRR", + evfststeq_2 = "1000029e-RR", + efsadd_3 = "100002c0RRR", + efssub_3 = "100002c1RRR", + efsabs_2 = "100002c4RR", + efsnabs_2 = "100002c5RR", + efsneg_2 = "100002c6RR", + efsmul_3 = "100002c8RRR", + efsdiv_3 = "100002c9RRR", + efscmpgt_3 = "100002ccXRR", + efscmpgt_2 = "100002cc-RR", + efscmplt_3 = "100002cdXRR", + efscmplt_2 = "100002cd-RR", + efscmpeq_3 = "100002ceXRR", + efscmpeq_2 = "100002ce-RR", + efscfd_2 = "100002cfR-R", + efscfui_2 = "100002d0R-R", + efscfsi_2 = "100002d1R-R", + efscfuf_2 = "100002d2R-R", + efscfsf_2 = "100002d3R-R", + efsctui_2 = "100002d4R-R", + efsctsi_2 = "100002d5R-R", + efsctuf_2 = "100002d6R-R", + efsctsf_2 = "100002d7R-R", + efsctuiz_2 = "100002d8R-R", + efsctsiz_2 = "100002daR-R", + efststgt_3 = "100002dcXRR", + efststgt_2 = "100002dc-RR", + efststlt_3 = "100002ddXRR", + efststlt_2 = "100002dd-RR", + efststeq_3 = "100002deXRR", + efststeq_2 = "100002de-RR", + efdadd_3 = "100002e0RRR", + efdsub_3 = "100002e1RRR", + efdcfuid_2 = "100002e2R-R", + efdcfsid_2 = "100002e3R-R", + efdabs_2 = "100002e4RR", + efdnabs_2 = "100002e5RR", + efdneg_2 = "100002e6RR", + efdmul_3 = "100002e8RRR", + efddiv_3 = "100002e9RRR", + efdctuidz_2 = "100002eaR-R", + efdctsidz_2 = "100002ebR-R", + efdcmpgt_3 = "100002ecXRR", + efdcmpgt_2 = "100002ec-RR", + efdcmplt_3 = "100002edXRR", + efdcmplt_2 = "100002ed-RR", + efdcmpeq_3 = "100002eeXRR", + efdcmpeq_2 = "100002ee-RR", + efdcfs_2 = "100002efR-R", + efdcfui_2 = "100002f0R-R", + efdcfsi_2 = "100002f1R-R", + efdcfuf_2 = "100002f2R-R", + efdcfsf_2 = "100002f3R-R", + efdctui_2 = "100002f4R-R", + efdctsi_2 = "100002f5R-R", + efdctuf_2 = "100002f6R-R", + efdctsf_2 = "100002f7R-R", + efdctuiz_2 = "100002f8R-R", + efdctsiz_2 = "100002faR-R", + efdtstgt_3 = "100002fcXRR", + efdtstgt_2 = "100002fc-RR", + efdtstlt_3 = "100002fdXRR", + efdtstlt_2 = "100002fd-RR", + efdtsteq_3 = "100002feXRR", + efdtsteq_2 = "100002fe-RR", + evlddx_3 = "10000300RR0R", + evldd_2 = "10000301R8", + evldwx_3 = "10000302RR0R", + evldw_2 = "10000303R8", + evldhx_3 = "10000304RR0R", + evldh_2 = "10000305R8", + evlwhex_3 = "10000310RR0R", + evlwhe_2 = "10000311R4", + evlwhoux_3 = "10000314RR0R", + evlwhou_2 = "10000315R4", + evlwhosx_3 = "10000316RR0R", + evlwhos_2 = "10000317R4", + evstddx_3 = "10000320RR0R", + evstdd_2 = "10000321R8", + evstdwx_3 = "10000322RR0R", + evstdw_2 = "10000323R8", + evstdhx_3 = "10000324RR0R", + evstdh_2 = "10000325R8", + evstwhex_3 = "10000330RR0R", + evstwhe_2 = "10000331R4", + evstwhox_3 = "10000334RR0R", + evstwho_2 = "10000335R4", + evstwwex_3 = "10000338RR0R", + evstwwe_2 = "10000339R4", + evstwwox_3 = "1000033cRR0R", + evstwwo_2 = "1000033dR4", + evmhessf_3 = "10000403RRR", + evmhossf_3 = "10000407RRR", + evmheumi_3 = "10000408RRR", + evmhesmi_3 = "10000409RRR", + evmhesmf_3 = "1000040bRRR", + evmhoumi_3 = "1000040cRRR", + evmhosmi_3 = "1000040dRRR", + evmhosmf_3 = "1000040fRRR", + evmhessfa_3 = "10000423RRR", + evmhossfa_3 = "10000427RRR", + evmheumia_3 = "10000428RRR", + evmhesmia_3 = "10000429RRR", + evmhesmfa_3 = "1000042bRRR", + evmhoumia_3 = "1000042cRRR", + evmhosmia_3 = "1000042dRRR", + evmhosmfa_3 = "1000042fRRR", + evmwhssf_3 = "10000447RRR", + evmwlumi_3 = "10000448RRR", + evmwhumi_3 = "1000044cRRR", + evmwhsmi_3 = "1000044dRRR", + evmwhsmf_3 = "1000044fRRR", + evmwssf_3 = "10000453RRR", + evmwumi_3 = "10000458RRR", + evmwsmi_3 = "10000459RRR", + evmwsmf_3 = "1000045bRRR", + evmwhssfa_3 = "10000467RRR", + evmwlumia_3 = "10000468RRR", + evmwhumia_3 = "1000046cRRR", + evmwhsmia_3 = "1000046dRRR", + evmwhsmfa_3 = "1000046fRRR", + evmwssfa_3 = "10000473RRR", + evmwumia_3 = "10000478RRR", + evmwsmia_3 = "10000479RRR", + evmwsmfa_3 = "1000047bRRR", + evmra_2 = "100004c4RR", + evdivws_3 = "100004c6RRR", + evdivwu_3 = "100004c7RRR", + evmwssfaa_3 = "10000553RRR", + evmwumiaa_3 = "10000558RRR", + evmwsmiaa_3 = "10000559RRR", + evmwsmfaa_3 = "1000055bRRR", + evmwssfan_3 = "100005d3RRR", + evmwumian_3 = "100005d8RRR", + evmwsmian_3 = "100005d9RRR", + evmwsmfan_3 = "100005dbRRR", + evmergehilo_3 = "1000022eRRR", + evmergelohi_3 = "1000022fRRR", + evlhhesplatx_3 = "10000308RR0R", + evlhhesplat_2 = "10000309R2", + evlhhousplatx_3 = "1000030cRR0R", + evlhhousplat_2 = "1000030dR2", + evlhhossplatx_3 = "1000030eRR0R", + evlhhossplat_2 = "1000030fR2", + evlwwsplatx_3 = "10000318RR0R", + evlwwsplat_2 = "10000319R4", + evlwhsplatx_3 = "1000031cRR0R", + evlwhsplat_2 = "1000031dR4", + evaddusiaaw_2 = "100004c0RR", + evaddssiaaw_2 = "100004c1RR", + evsubfusiaaw_2 = "100004c2RR", + evsubfssiaaw_2 = "100004c3RR", + evaddumiaaw_2 = "100004c8RR", + evaddsmiaaw_2 = "100004c9RR", + evsubfumiaaw_2 = "100004caRR", + evsubfsmiaaw_2 = "100004cbRR", + evmheusiaaw_3 = "10000500RRR", + evmhessiaaw_3 = "10000501RRR", + evmhessfaaw_3 = "10000503RRR", + evmhousiaaw_3 = "10000504RRR", + evmhossiaaw_3 = "10000505RRR", + evmhossfaaw_3 = "10000507RRR", + evmheumiaaw_3 = "10000508RRR", + evmhesmiaaw_3 = "10000509RRR", + evmhesmfaaw_3 = "1000050bRRR", + evmhoumiaaw_3 = "1000050cRRR", + evmhosmiaaw_3 = "1000050dRRR", + evmhosmfaaw_3 = "1000050fRRR", + evmhegumiaa_3 = "10000528RRR", + evmhegsmiaa_3 = "10000529RRR", + evmhegsmfaa_3 = "1000052bRRR", + evmhogumiaa_3 = "1000052cRRR", + evmhogsmiaa_3 = "1000052dRRR", + evmhogsmfaa_3 = "1000052fRRR", + evmwlusiaaw_3 = "10000540RRR", + evmwlssiaaw_3 = "10000541RRR", + evmwlumiaaw_3 = "10000548RRR", + evmwlsmiaaw_3 = "10000549RRR", + evmheusianw_3 = "10000580RRR", + evmhessianw_3 = "10000581RRR", + evmhessfanw_3 = "10000583RRR", + evmhousianw_3 = "10000584RRR", + evmhossianw_3 = "10000585RRR", + evmhossfanw_3 = "10000587RRR", + evmheumianw_3 = "10000588RRR", + evmhesmianw_3 = "10000589RRR", + evmhesmfanw_3 = "1000058bRRR", + evmhoumianw_3 = "1000058cRRR", + evmhosmianw_3 = "1000058dRRR", + evmhosmfanw_3 = "1000058fRRR", + evmhegumian_3 = "100005a8RRR", + evmhegsmian_3 = "100005a9RRR", + evmhegsmfan_3 = "100005abRRR", + evmhogumian_3 = "100005acRRR", + evmhogsmian_3 = "100005adRRR", + evmhogsmfan_3 = "100005afRRR", + evmwlusianw_3 = "100005c0RRR", + evmwlssianw_3 = "100005c1RRR", + evmwlumianw_3 = "100005c8RRR", + evmwlsmianw_3 = "100005c9RRR", + + -- NYI: some 64 bit PowerPC and Book E instructions: + -- rldicl, rldicr, rldic, rldimi, rldcl, rldcr, sradi, 64 bit ext. add/sub, + -- extended addressing branches, cache management, loads and stores +} + +-- Add mnemonics for "." variants. +do + local t = {} + for k,v in pairs(map_op) do + if sub(v, -1) == "." then + local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2) + t[sub(k, 1, -3).."."..sub(k, -2)] = v2 + end + end + for k,v in pairs(t) do + map_op[k] = v + end +end + +-- Add more branch mnemonics. +for cond,c in pairs(map_cond) do + local b1 = "b"..cond + local c1 = (c%4)*0x00010000 + (c < 4 and 0x01000000 or 0) + -- bX[l] + map_op[b1.."_1"] = tohex(0x40800000 + c1).."K" + map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K" + map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK" + map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK" + -- bXlr[l] + map_op[b1.."lr_0"] = tohex(0x4c800020 + c1) + map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1) + map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1) + map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1) + -- bXctr[l] + map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X" + map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X" + map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X" + map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X" +end + +------------------------------------------------------------------------------ + +local function parse_gpr(expr) + local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + if not reg then + werror("type `"..(tname or expr).."' needs a register override") + end + expr = reg + end + local r = match(expr, "^r([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r, tp end + end + werror("bad register name `"..expr.."'") +end + +local function parse_fpr(expr) + local r = match(expr, "^f([1-3]?[0-9])$") + if r then + r = tonumber(r) + if r <= 31 then return r end + end + werror("bad register name `"..expr.."'") +end + +local function parse_cr(expr) + local r = match(expr, "^cr([0-7])$") + if r then return tonumber(r) end + werror("bad condition register name `"..expr.."'") +end + +local function parse_cond(expr) + local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$") + if r then + r = tonumber(r) + local c = map_cond[cond] + if c and c < 4 then return r*4+c end + end + werror("bad condition bit name `"..expr.."'") +end + +local function parse_imm(imm, bits, shift, scale, signed) + local n = tonumber(imm) + if n then + if n % 2^scale == 0 then + n = n / 2^scale + if signed then + if n >= 0 then + if n < 2^(bits-1) then return n*2^shift end + else + if n >= -(2^(bits-1))-1 then return (n+2^bits)*2^shift end + end + else + if n >= 0 and n <= 2^bits-1 then return n*2^shift end + end + end + werror("out of range immediate `"..imm.."'") + elseif match(imm, "^r([1-3]?[0-9])$") or + match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then + werror("expected immediate operand, got register") + else + waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm) + return 0 + end +end + +local function parse_disp(disp) + local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") + if imm then + local r = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + return r*65536 + parse_imm(imm, 16, 0, 0, true) + end + local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local r, tp = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + if tp then + waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr)) + return r*65536 + end + end + werror("bad displacement `"..disp.."'") +end + +local function parse_u5disp(disp, scale) + local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$") + if imm then + local r = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + return r*65536 + parse_imm(imm, 5, 11, scale, false) + end + local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$") + if reg and tailr ~= "" then + local r, tp = parse_gpr(reg) + if r == 0 then werror("cannot use r0 in displacement") end + if tp then + waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr)) + return r*65536 + end + end + werror("bad displacement `"..disp.."'") +end + +local function parse_label(label, def) + local prefix = sub(label, 1, 2) + -- =>label (pc label reference) + if prefix == "=>" then + return "PC", 0, sub(label, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "LG", map_global[sub(label, 3)] + end + if def then + -- [1-9] (local label definition) + if match(label, "^[1-9]$") then + return "LG", 10+tonumber(label) + end + else + -- [<>][1-9] (local label reference) + local dir, lnum = match(label, "^([<>])([1-9])$") + if dir then -- Fwd: 1-9, Bkwd: 11-19. + return "LG", lnum + (dir == ">" and 0 or 10) + end + -- extern label (extern label reference) + local extname = match(label, "^extern%s+(%S+)$") + if extname then + return "EXT", map_extern[extname] + end + end + werror("bad label `"..label.."'") +end + +------------------------------------------------------------------------------ + +-- Handle opcodes defined with template strings. +map_op[".template__"] = function(params, template, nparams) + if not params then return sub(template, 9) end + local op = tonumber(sub(template, 1, 8), 16) + local n, rs = 1, 26 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 3 positions (rlwinm). + if secpos+3 > maxsecpos then wflush() end + local pos = wpos() + + -- Process each character. + for p in gmatch(sub(template, 9), ".") do + if p == "R" then + rs = rs - 5; op = op + parse_gpr(params[n]) * 2^rs; n = n + 1 + elseif p == "F" then + rs = rs - 5; op = op + parse_fpr(params[n]) * 2^rs; n = n + 1 + elseif p == "A" then + rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1 + elseif p == "S" then + rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1 + elseif p == "I" then + op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1 + elseif p == "U" then + op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1 + elseif p == "D" then + op = op + parse_disp(params[n]); n = n + 1 + elseif p == "2" then + op = op + parse_u5disp(params[n], 1); n = n + 1 + elseif p == "4" then + op = op + parse_u5disp(params[n], 2); n = n + 1 + elseif p == "8" then + op = op + parse_u5disp(params[n], 3); n = n + 1 + elseif p == "C" then + rs = rs - 5; op = op + parse_cond(params[n]) * 2^rs; n = n + 1 + elseif p == "X" then + rs = rs - 5; op = op + parse_cr(params[n]) * 2^(rs+2); n = n + 1 + elseif p == "W" then + op = op + parse_cr(params[n]); n = n + 1 + elseif p == "J" or p == "K" then + local mode, n, s = parse_label(params[n], false) + if p == "K" then n = n + 2048 end + waction("REL_"..mode, n, s, 1) + n = n + 1 + elseif p == "0" then + local mm = 2^rs + local t = op % mm + if ((op - t) / mm) % 32 == 0 then werror("cannot use r0") end + elseif p == "=" or p == "%" then + local mm = 2^(rs + (p == "%" and 5 or 0)) + local t = ((op - op % mm) / mm) % 32 + rs = rs - 5 + op = op + t * 2^rs + elseif p == "~" then + local mm = 2^rs + local t1l = op % mm + local t1h = (op - t1l) / mm + local t2l = t1h % 32 + local t2h = (t1h - t2l) / 32 + local t3l = t2h % 32 + op = ((t2h - t3l + t2l)*32 + t3l)*mm + t1l + elseif p == "-" then + rs = rs - 5 + elseif p == "." then + -- Ignored. + else + assert(false) + end + end + wputpos(pos, op) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_1"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr" end + if secpos+1 > maxsecpos then wflush() end + local mode, n, s = parse_label(params[1], true) + if mode == "EXT" then werror("bad label definition") end + waction("LABEL_"..mode, n, s, 1) +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +map_op[".long_*"] = function(params) + if not params then return "imm..." end + for _,p in ipairs(params) do + local n = tonumber(p) + if not n then werror("bad immediate `"..p.."'") end + if n < 0 then n = n + 2^32 end + wputw(n) + if secpos+2 > maxsecpos then wflush() end + end +end + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION", num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/src/luajit2/dynasm/dasm_proto.h b/src/luajit2/dynasm/dasm_proto.h new file mode 100644 index 0000000000..dc9ed51067 --- /dev/null +++ b/src/luajit2/dynasm/dasm_proto.h @@ -0,0 +1,83 @@ +/* +** DynASM encoding engine prototypes. +** Copyright (C) 2005-2011 Mike Pall. All rights reserved. +** Released under the MIT/X license. See dynasm.lua for full copyright notice. +*/ + +#ifndef _DASM_PROTO_H +#define _DASM_PROTO_H + +#include <stddef.h> +#include <stdarg.h> + +#define DASM_IDENT "DynASM 1.3.0" +#define DASM_VERSION 10300 /* 1.3.0 */ + +#ifndef Dst_DECL +#define Dst_DECL dasm_State **Dst +#endif + +#ifndef Dst_REF +#define Dst_REF (*Dst) +#endif + +#ifndef DASM_FDEF +#define DASM_FDEF extern +#endif + +#ifndef DASM_M_GROW +#define DASM_M_GROW(ctx, t, p, sz, need) \ + do { \ + size_t _sz = (sz), _need = (need); \ + if (_sz < _need) { \ + if (_sz < 16) _sz = 16; \ + while (_sz < _need) _sz += _sz; \ + (p) = (t *)realloc((p), _sz); \ + if ((p) == NULL) exit(1); \ + (sz) = _sz; \ + } \ + } while(0) +#endif + +#ifndef DASM_M_FREE +#define DASM_M_FREE(ctx, p, sz) free(p) +#endif + +/* Internal DynASM encoder state. */ +typedef struct dasm_State dasm_State; + + +/* Initialize and free DynASM state. */ +DASM_FDEF void dasm_init(Dst_DECL, int maxsection); +DASM_FDEF void dasm_free(Dst_DECL); + +/* Setup global array. Must be called before dasm_setup(). */ +DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl); + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc); + +/* Setup encoder. */ +DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist); + +/* Feed encoder with actions. Calls are generated by pre-processor. */ +DASM_FDEF void dasm_put(Dst_DECL, int start, ...); + +/* Link sections and return the resulting size. */ +DASM_FDEF int dasm_link(Dst_DECL, size_t *szp); + +/* Encode sections into buffer. */ +DASM_FDEF int dasm_encode(Dst_DECL, void *buffer); + +/* Get PC label offset. */ +DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc); + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch); +#else +#define dasm_checkstep(a, b) 0 +#endif + + +#endif /* _DASM_PROTO_H */ diff --git a/src/luajit2/dynasm/dasm_x64.lua b/src/luajit2/dynasm/dasm_x64.lua new file mode 100644 index 0000000000..73e01e919a --- /dev/null +++ b/src/luajit2/dynasm/dasm_x64.lua @@ -0,0 +1,12 @@ +------------------------------------------------------------------------------ +-- DynASM x64 module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ +-- This module just sets 64 bit mode for the combined x86/x64 module. +-- All the interesting stuff is there. +------------------------------------------------------------------------------ + +x64 = true -- Using a global is an ugly, but effective solution. +return require("dasm_x86") diff --git a/src/luajit2/dynasm/dasm_x86.h b/src/luajit2/dynasm/dasm_x86.h new file mode 100644 index 0000000000..23e213cb6e --- /dev/null +++ b/src/luajit2/dynasm/dasm_x86.h @@ -0,0 +1,470 @@ +/* +** DynASM x86 encoding engine. +** Copyright (C) 2005-2011 Mike Pall. All rights reserved. +** Released under the MIT/X license. See dynasm.lua for full copyright notice. +*/ + +#include <stddef.h> +#include <stdarg.h> +#include <string.h> +#include <stdlib.h> + +#define DASM_ARCH "x86" + +#ifndef DASM_EXTERN +#define DASM_EXTERN(a,b,c,d) 0 +#endif + +/* Action definitions. DASM_STOP must be 255. */ +enum { + DASM_DISP = 233, + DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB, + DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC, + DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN, + DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP +}; + +/* Maximum number of section buffer positions for a single dasm_put() call. */ +#define DASM_MAXSECPOS 25 + +/* DynASM encoder status codes. Action list offset or number are or'ed in. */ +#define DASM_S_OK 0x00000000 +#define DASM_S_NOMEM 0x01000000 +#define DASM_S_PHASE 0x02000000 +#define DASM_S_MATCH_SEC 0x03000000 +#define DASM_S_RANGE_I 0x11000000 +#define DASM_S_RANGE_SEC 0x12000000 +#define DASM_S_RANGE_LG 0x13000000 +#define DASM_S_RANGE_PC 0x14000000 +#define DASM_S_RANGE_VREG 0x15000000 +#define DASM_S_UNDEF_L 0x21000000 +#define DASM_S_UNDEF_PC 0x22000000 + +/* Macros to convert positions (8 bit section + 24 bit index). */ +#define DASM_POS2IDX(pos) ((pos)&0x00ffffff) +#define DASM_POS2BIAS(pos) ((pos)&0xff000000) +#define DASM_SEC2POS(sec) ((sec)<<24) +#define DASM_POS2SEC(pos) ((pos)>>24) +#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos)) + +/* Action list type. */ +typedef const unsigned char *dasm_ActList; + +/* Per-section structure. */ +typedef struct dasm_Section { + int *rbuf; /* Biased buffer pointer (negative section bias). */ + int *buf; /* True buffer pointer. */ + size_t bsize; /* Buffer size in bytes. */ + int pos; /* Biased buffer position. */ + int epos; /* End of biased buffer position - max single put. */ + int ofs; /* Byte offset into section. */ +} dasm_Section; + +/* Core structure holding the DynASM encoding state. */ +struct dasm_State { + size_t psize; /* Allocated size of this structure. */ + dasm_ActList actionlist; /* Current actionlist pointer. */ + int *lglabels; /* Local/global chain/pos ptrs. */ + size_t lgsize; + int *pclabels; /* PC label chains/pos ptrs. */ + size_t pcsize; + void **globals; /* Array of globals (bias -10). */ + dasm_Section *section; /* Pointer to active section. */ + size_t codesize; /* Total size of all code sections. */ + int maxsection; /* 0 <= sectionidx < maxsection. */ + int status; /* Status code. */ + dasm_Section sections[1]; /* All sections. Alloc-extended. */ +}; + +/* The size of the core structure depends on the max. number of sections. */ +#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section)) + + +/* Initialize DynASM state. */ +void dasm_init(Dst_DECL, int maxsection) +{ + dasm_State *D; + size_t psz = 0; + int i; + Dst_REF = NULL; + DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection)); + D = Dst_REF; + D->psize = psz; + D->lglabels = NULL; + D->lgsize = 0; + D->pclabels = NULL; + D->pcsize = 0; + D->globals = NULL; + D->maxsection = maxsection; + for (i = 0; i < maxsection; i++) { + D->sections[i].buf = NULL; /* Need this for pass3. */ + D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i); + D->sections[i].bsize = 0; + D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */ + } +} + +/* Free DynASM state. */ +void dasm_free(Dst_DECL) +{ + dasm_State *D = Dst_REF; + int i; + for (i = 0; i < D->maxsection; i++) + if (D->sections[i].buf) + DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize); + if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize); + if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize); + DASM_M_FREE(Dst, D, D->psize); +} + +/* Setup global label array. Must be called before dasm_setup(). */ +void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl) +{ + dasm_State *D = Dst_REF; + D->globals = gl - 10; /* Negative bias to compensate for locals. */ + DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int)); +} + +/* Grow PC label array. Can be called after dasm_setup(), too. */ +void dasm_growpc(Dst_DECL, unsigned int maxpc) +{ + dasm_State *D = Dst_REF; + size_t osz = D->pcsize; + DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int)); + memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz); +} + +/* Setup encoder. */ +void dasm_setup(Dst_DECL, const void *actionlist) +{ + dasm_State *D = Dst_REF; + int i; + D->actionlist = (dasm_ActList)actionlist; + D->status = DASM_S_OK; + D->section = &D->sections[0]; + memset((void *)D->lglabels, 0, D->lgsize); + if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize); + for (i = 0; i < D->maxsection; i++) { + D->sections[i].pos = DASM_SEC2POS(i); + D->sections[i].ofs = 0; + } +} + + +#ifdef DASM_CHECKS +#define CK(x, st) \ + do { if (!(x)) { \ + D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0) +#define CKPL(kind, st) \ + do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \ + D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0) +#else +#define CK(x, st) ((void)0) +#define CKPL(kind, st) ((void)0) +#endif + +/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */ +void dasm_put(Dst_DECL, int start, ...) +{ + va_list ap; + dasm_State *D = Dst_REF; + dasm_ActList p = D->actionlist + start; + dasm_Section *sec = D->section; + int pos = sec->pos, ofs = sec->ofs, mrm = 4; + int *b; + + if (pos >= sec->epos) { + DASM_M_GROW(Dst, int, sec->buf, sec->bsize, + sec->bsize + 2*DASM_MAXSECPOS*sizeof(int)); + sec->rbuf = sec->buf - DASM_POS2BIAS(pos); + sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos); + } + + b = sec->rbuf; + b[pos++] = start; + + va_start(ap, start); + while (1) { + int action = *p++; + if (action < DASM_DISP) { + ofs++; + } else if (action <= DASM_REL_A) { + int n = va_arg(ap, int); + b[pos++] = n; + switch (action) { + case DASM_DISP: + if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; } + case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob; + case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */ + case DASM_IMM_D: ofs += 4; break; + case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob; + case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break; + case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob; + case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break; + case DASM_SPACE: p++; ofs += n; break; + case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */ + case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG); + if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue; + } + mrm = 4; + } else { + int *pl, n; + switch (action) { + case DASM_REL_LG: + case DASM_IMM_LG: + n = *p++; pl = D->lglabels + n; + if (n <= 246) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */ + pl -= 246; n = *pl; + if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */ + goto linkrel; + case DASM_REL_PC: + case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); + putrel: + n = *pl; + if (n < 0) { /* Label exists. Get label pos and store it. */ + b[pos] = -n; + } else { + linkrel: + b[pos] = n; /* Else link to rel chain, anchored at label. */ + *pl = pos; + } + pos++; + ofs += 4; /* Maximum offset needed. */ + if (action == DASM_REL_LG || action == DASM_REL_PC) + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel; + case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC); + putlabel: + n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; } + *pl = -pos; /* Label exists now. */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_ALIGN: + ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */ + b[pos++] = ofs; /* Store pass1 offset estimate. */ + break; + case DASM_EXTERN: p += 2; ofs += 4; break; + case DASM_ESC: p++; ofs++; break; + case DASM_MARK: mrm = p[-2]; break; + case DASM_SECTION: + n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n]; + case DASM_STOP: goto stop; + } + } + } +stop: + va_end(ap); + sec->pos = pos; + sec->ofs = ofs; +} +#undef CK + +/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */ +int dasm_link(Dst_DECL, size_t *szp) +{ + dasm_State *D = Dst_REF; + int secnum; + int ofs = 0; + +#ifdef DASM_CHECKS + *szp = 0; + if (D->status != DASM_S_OK) return D->status; + { + int pc; + for (pc = 0; pc*sizeof(int) < D->pcsize; pc++) + if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc; + } +#endif + + { /* Handle globals not defined in this translation unit. */ + int idx; + for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) { + int n = D->lglabels[idx]; + /* Undefined label: Collapse rel chain and replace with marker (< 0). */ + while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; } + } + } + + /* Combine all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->rbuf; + int pos = DASM_SEC2POS(secnum); + int lastpos = sec->pos; + + while (pos != lastpos) { + dasm_ActList p = D->actionlist + b[pos++]; + while (1) { + int op, action = *p++; + switch (action) { + case DASM_REL_LG: p++; op = p[-3]; goto rel_pc; + case DASM_REL_PC: op = p[-2]; rel_pc: { + int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0); + if (shrink) { /* Shrinkable branch opcode? */ + int lofs, lpos = b[pos]; + if (lpos < 0) goto noshrink; /* Ext global? */ + lofs = *DASM_POS2PTR(D, lpos); + if (lpos > pos) { /* Fwd label: add cumulative section offsets. */ + int i; + for (i = secnum; i < DASM_POS2SEC(lpos); i++) + lofs += D->sections[i].ofs; + } else { + lofs -= ofs; /* Bkwd label: unfix offset. */ + } + lofs -= b[pos+1]; /* Short branch ok? */ + if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */ + else { noshrink: shrink = 0; } /* No, cannot shrink op. */ + } + b[pos+1] = shrink; + pos += 2; + break; + } + case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++; + case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W: + case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB: + case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break; + case DASM_LABEL_LG: p++; + case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */ + case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */ + case DASM_EXTERN: p += 2; break; + case DASM_ESC: p++; break; + case DASM_MARK: break; + case DASM_SECTION: case DASM_STOP: goto stop; + } + } + stop: (void)0; + } + ofs += sec->ofs; /* Next section starts right after current section. */ + } + + D->codesize = ofs; /* Total size of all code sections */ + *szp = ofs; + return DASM_S_OK; +} + +#define dasmb(x) *cp++ = (unsigned char)(x) +#ifndef DASM_ALIGNED_WRITES +#define dasmw(x) \ + do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0) +#define dasmd(x) \ + do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0) +#else +#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0) +#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0) +#endif + +/* Pass 3: Encode sections. */ +int dasm_encode(Dst_DECL, void *buffer) +{ + dasm_State *D = Dst_REF; + unsigned char *base = (unsigned char *)buffer; + unsigned char *cp = base; + int secnum; + + /* Encode all code sections. No support for data sections (yet). */ + for (secnum = 0; secnum < D->maxsection; secnum++) { + dasm_Section *sec = D->sections + secnum; + int *b = sec->buf; + int *endb = sec->rbuf + sec->pos; + + while (b != endb) { + dasm_ActList p = D->actionlist + *b++; + unsigned char *mark = NULL; + while (1) { + int action = *p++; + int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0; + switch (action) { + case DASM_DISP: if (!mark) mark = cp; { + unsigned char *mm = mark; + if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL; + if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7; + if (mrm != 5) { mm[-1] -= 0x80; break; } } + if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40; + } + case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break; + case DASM_IMM_DB: if (((n+128)&-256) == 0) { + db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb; + } else mark = NULL; + case DASM_IMM_D: wd: dasmd(n); break; + case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL; + case DASM_IMM_W: dasmw(n); break; + case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; } + case DASM_REL_LG: p++; if (n >= 0) goto rel_pc; + b++; n = (int)(ptrdiff_t)D->globals[-n]; + case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */ + case DASM_REL_PC: rel_pc: { + int shrink = *b++; + int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; } + n = *pb - ((int)(cp-base) + 4-shrink); + if (shrink == 0) goto wd; + if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb; + goto wb; + } + case DASM_IMM_LG: + p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; } + case DASM_IMM_PC: { + int *pb = DASM_POS2PTR(D, n); + n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base); + goto wd; + } + case DASM_LABEL_LG: { + int idx = *p++; + if (idx >= 10) + D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n)); + break; + } + case DASM_LABEL_PC: case DASM_SETLABEL: break; + case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; } + case DASM_ALIGN: + n = *p++; + while (((cp-base) & n)) *cp++ = 0x90; /* nop */ + break; + case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd; + case DASM_MARK: mark = cp; break; + case DASM_ESC: action = *p++; + default: *cp++ = action; break; + case DASM_SECTION: case DASM_STOP: goto stop; + } + } + stop: (void)0; + } + } + + if (base + D->codesize != cp) /* Check for phase errors. */ + return DASM_S_PHASE; + return DASM_S_OK; +} + +/* Get PC label offset. */ +int dasm_getpclabel(Dst_DECL, unsigned int pc) +{ + dasm_State *D = Dst_REF; + if (pc*sizeof(int) < D->pcsize) { + int pos = D->pclabels[pc]; + if (pos < 0) return *DASM_POS2PTR(D, -pos); + if (pos > 0) return -1; /* Undefined. */ + } + return -2; /* Unused or out of range. */ +} + +#ifdef DASM_CHECKS +/* Optional sanity checker to call between isolated encoding steps. */ +int dasm_checkstep(Dst_DECL, int secmatch) +{ + dasm_State *D = Dst_REF; + if (D->status == DASM_S_OK) { + int i; + for (i = 1; i <= 9; i++) { + if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; } + D->lglabels[i] = 0; + } + } + if (D->status == DASM_S_OK && secmatch >= 0 && + D->section != &D->sections[secmatch]) + D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections); + return D->status; +} +#endif + diff --git a/src/luajit2/dynasm/dasm_x86.lua b/src/luajit2/dynasm/dasm_x86.lua new file mode 100644 index 0000000000..5571f93e68 --- /dev/null +++ b/src/luajit2/dynasm/dasm_x86.lua @@ -0,0 +1,1931 @@ +------------------------------------------------------------------------------ +-- DynASM x86/x64 module. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- See dynasm.lua for full copyright notice. +------------------------------------------------------------------------------ + +local x64 = x64 + +-- Module information: +local _info = { + arch = x64 and "x64" or "x86", + description = "DynASM x86/x64 module", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + license = "MIT", +} + +-- Exported glue functions for the arch-specific module. +local _M = { _info = _info } + +-- Cache library functions. +local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs +local assert, unpack, setmetatable = assert, unpack, setmetatable +local _s = string +local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char +local find, match, gmatch, gsub = _s.find, _s.match, _s.gmatch, _s.gsub +local concat, sort = table.concat, table.sort + +-- Inherited tables and callbacks. +local g_opt, g_arch +local wline, werror, wfatal, wwarn + +-- Action name list. +-- CHECK: Keep this in sync with the C code! +local action_names = { + -- int arg, 1 buffer pos: + "DISP", "IMM_S", "IMM_B", "IMM_W", "IMM_D", "IMM_WB", "IMM_DB", + -- action arg (1 byte), int arg, 1 buffer pos (reg/num): + "VREG", "SPACE", -- !x64: VREG support NYI. + -- ptrdiff_t arg, 1 buffer pos (address): !x64 + "SETLABEL", "REL_A", + -- action arg (1 byte) or int arg, 2 buffer pos (link, offset): + "REL_LG", "REL_PC", + -- action arg (1 byte) or int arg, 1 buffer pos (link): + "IMM_LG", "IMM_PC", + -- action arg (1 byte) or int arg, 1 buffer pos (offset): + "LABEL_LG", "LABEL_PC", + -- action arg (1 byte), 1 buffer pos (offset): + "ALIGN", + -- action args (2 bytes), no buffer pos. + "EXTERN", + -- action arg (1 byte), no buffer pos. + "ESC", + -- no action arg, no buffer pos. + "MARK", + -- action arg (1 byte), no buffer pos, terminal action: + "SECTION", + -- no args, no buffer pos, terminal action: + "STOP" +} + +-- Maximum number of section buffer positions for dasm_put(). +-- CHECK: Keep this in sync with the C code! +local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines. + +-- Action name -> action number (dynamically generated below). +local map_action = {} +-- First action number. Everything below does not need to be escaped. +local actfirst = 256-#action_names + +-- Action list buffer and string (only used to remove dupes). +local actlist = {} +local actstr = "" + +-- Argument list for next dasm_put(). Start with offset 0 into action list. +local actargs = { 0 } + +-- Current number of section buffer positions for dasm_put(). +local secpos = 1 + +------------------------------------------------------------------------------ + +-- Compute action numbers for action names. +for n,name in ipairs(action_names) do + local num = actfirst + n - 1 + map_action[name] = num +end + +-- Dump action names and numbers. +local function dumpactions(out) + out:write("DynASM encoding engine action codes:\n") + for n,name in ipairs(action_names) do + local num = map_action[name] + out:write(format(" %-10s %02X %d\n", name, num, num)) + end + out:write("\n") +end + +-- Write action list buffer as a huge static C array. +local function writeactions(out, name) + local nn = #actlist + local last = actlist[nn] or 255 + actlist[nn] = nil -- Remove last byte. + if nn == 0 then nn = 1 end + out:write("static const unsigned char ", name, "[", nn, "] = {\n") + local s = " " + for n,b in ipairs(actlist) do + s = s..b.."," + if #s >= 75 then + assert(out:write(s, "\n")) + s = " " + end + end + out:write(s, last, "\n};\n\n") -- Add last byte back. +end + +------------------------------------------------------------------------------ + +-- Add byte to action list. +local function wputxb(n) + assert(n >= 0 and n <= 255 and n % 1 == 0, "byte out of range") + actlist[#actlist+1] = n +end + +-- Add action to list with optional arg. Advance buffer pos, too. +local function waction(action, a, num) + wputxb(assert(map_action[action], "bad action name `"..action.."'")) + if a then actargs[#actargs+1] = a end + if a or num then secpos = secpos + (num or 1) end +end + +-- Add call to embedded DynASM C code. +local function wcall(func, args) + wline(format("dasm_%s(Dst, %s);", func, concat(args, ", ")), true) +end + +-- Delete duplicate action list chunks. A tad slow, but so what. +local function dedupechunk(offset) + local al, as = actlist, actstr + local chunk = char(unpack(al, offset+1, #al)) + local orig = find(as, chunk, 1, true) + if orig then + actargs[1] = orig-1 -- Replace with original offset. + for i=offset+1,#al do al[i] = nil end -- Kill dupe. + else + actstr = as..chunk + end +end + +-- Flush action list (intervening C code or buffer pos overflow). +local function wflush(term) + local offset = actargs[1] + if #actlist == offset then return end -- Nothing to flush. + if not term then waction("STOP") end -- Terminate action list. + dedupechunk(offset) + wcall("put", actargs) -- Add call to dasm_put(). + actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put(). + secpos = 1 -- The actionlist offset occupies a buffer position, too. +end + +-- Put escaped byte. +local function wputb(n) + if n >= actfirst then waction("ESC") end -- Need to escape byte. + wputxb(n) +end + +------------------------------------------------------------------------------ + +-- Global label name -> global label number. With auto assignment on 1st use. +local next_global = 10 +local map_global = setmetatable({}, { __index = function(t, name) + if not match(name, "^[%a_][%w_@]*$") then werror("bad global label") end + local n = next_global + if n > 246 then werror("too many global labels") end + next_global = n + 1 + t[name] = n + return n +end}) + +-- Dump global labels. +local function dumpglobals(out, lvl) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("Global labels:\n") + for i=10,next_global-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write global label enum. +local function writeglobals(out, prefix) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("enum {\n") + for i=10,next_global-1 do + out:write(" ", prefix, gsub(t[i], "@.*", ""), ",\n") + end + out:write(" ", prefix, "_MAX\n};\n") +end + +-- Write global label names. +local function writeglobalnames(out, name) + local t = {} + for name, n in pairs(map_global) do t[n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=10,next_global-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Extern label name -> extern label number. With auto assignment on 1st use. +local next_extern = -1 +local map_extern = setmetatable({}, { __index = function(t, name) + -- No restrictions on the name for now. + local n = next_extern + if n < -256 then werror("too many extern labels") end + next_extern = n - 1 + t[name] = n + return n +end}) + +-- Dump extern labels. +local function dumpexterns(out, lvl) + local t = {} + for name, n in pairs(map_extern) do t[-n] = name end + out:write("Extern labels:\n") + for i=1,-next_extern-1 do + out:write(format(" %s\n", t[i])) + end + out:write("\n") +end + +-- Write extern label names. +local function writeexternnames(out, name) + local t = {} + for name, n in pairs(map_extern) do t[-n] = name end + out:write("static const char *const ", name, "[] = {\n") + for i=1,-next_extern-1 do + out:write(" \"", t[i], "\",\n") + end + out:write(" (const char *)0\n};\n") +end + +------------------------------------------------------------------------------ + +-- Arch-specific maps. +local map_archdef = {} -- Ext. register name -> int. name. +local map_reg_rev = {} -- Int. register name -> ext. name. +local map_reg_num = {} -- Int. register name -> register number. +local map_reg_opsize = {} -- Int. register name -> operand size. +local map_reg_valid_base = {} -- Int. register name -> valid base register? +local map_reg_valid_index = {} -- Int. register name -> valid index register? +local map_reg_needrex = {} -- Int. register name -> need rex vs. no rex. +local reg_list = {} -- Canonical list of int. register names. + +local map_type = {} -- Type name -> { ctype, reg } +local ctypenum = 0 -- Type number (for _PTx macros). + +local addrsize = x64 and "q" or "d" -- Size for address operands. + +-- Helper functions to fill register maps. +local function mkrmap(sz, cl, names) + local cname = format("@%s", sz) + reg_list[#reg_list+1] = cname + map_archdef[cl] = cname + map_reg_rev[cname] = cl + map_reg_num[cname] = -1 + map_reg_opsize[cname] = sz + if sz == addrsize or sz == "d" then + map_reg_valid_base[cname] = true + map_reg_valid_index[cname] = true + end + if names then + for n,name in ipairs(names) do + local iname = format("@%s%x", sz, n-1) + reg_list[#reg_list+1] = iname + map_archdef[name] = iname + map_reg_rev[iname] = name + map_reg_num[iname] = n-1 + map_reg_opsize[iname] = sz + if sz == "b" and n > 4 then map_reg_needrex[iname] = false end + if sz == addrsize or sz == "d" then + map_reg_valid_base[iname] = true + map_reg_valid_index[iname] = true + end + end + end + for i=0,(x64 and sz ~= "f") and 15 or 7 do + local needrex = sz == "b" and i > 3 + local iname = format("@%s%x%s", sz, i, needrex and "R" or "") + if needrex then map_reg_needrex[iname] = true end + local name + if sz == "o" then name = format("xmm%d", i) + elseif sz == "f" then name = format("st%d", i) + else name = format("r%d%s", i, sz == addrsize and "" or sz) end + map_archdef[name] = iname + if not map_reg_rev[iname] then + reg_list[#reg_list+1] = iname + map_reg_rev[iname] = name + map_reg_num[iname] = i + map_reg_opsize[iname] = sz + if sz == addrsize or sz == "d" then + map_reg_valid_base[iname] = true + map_reg_valid_index[iname] = true + end + end + end + reg_list[#reg_list+1] = "" +end + +-- Integer registers (qword, dword, word and byte sized). +if x64 then + mkrmap("q", "Rq", {"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi"}) +end +mkrmap("d", "Rd", {"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"}) +mkrmap("w", "Rw", {"ax", "cx", "dx", "bx", "sp", "bp", "si", "di"}) +mkrmap("b", "Rb", {"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"}) +map_reg_valid_index[map_archdef.esp] = false +if x64 then map_reg_valid_index[map_archdef.rsp] = false end +map_archdef["Ra"] = "@"..addrsize + +-- FP registers (internally tword sized, but use "f" as operand size). +mkrmap("f", "Rf") + +-- SSE registers (oword sized, but qword and dword accessible). +mkrmap("o", "xmm") + +-- Operand size prefixes to codes. +local map_opsize = { + byte = "b", word = "w", dword = "d", qword = "q", oword = "o", tword = "t", + aword = addrsize, +} + +-- Operand size code to number. +local map_opsizenum = { + b = 1, w = 2, d = 4, q = 8, o = 16, t = 10, +} + +-- Operand size code to name. +local map_opsizename = { + b = "byte", w = "word", d = "dword", q = "qword", o = "oword", t = "tword", + f = "fpword", +} + +-- Valid index register scale factors. +local map_xsc = { + ["1"] = 0, ["2"] = 1, ["4"] = 2, ["8"] = 3, +} + +-- Condition codes. +local map_cc = { + o = 0, no = 1, b = 2, nb = 3, e = 4, ne = 5, be = 6, nbe = 7, + s = 8, ns = 9, p = 10, np = 11, l = 12, nl = 13, le = 14, nle = 15, + c = 2, nae = 2, nc = 3, ae = 3, z = 4, nz = 5, na = 6, a = 7, + pe = 10, po = 11, nge = 12, ge = 13, ng = 14, g = 15, +} + + +-- Reverse defines for registers. +function _M.revdef(s) + return gsub(s, "@%w+", map_reg_rev) +end + +-- Dump register names and numbers +local function dumpregs(out) + out:write("Register names, sizes and internal numbers:\n") + for _,reg in ipairs(reg_list) do + if reg == "" then + out:write("\n") + else + local name = map_reg_rev[reg] + local num = map_reg_num[reg] + local opsize = map_opsizename[map_reg_opsize[reg]] + out:write(format(" %-5s %-8s %s\n", name, opsize, + num < 0 and "(variable)" or num)) + end + end +end + +------------------------------------------------------------------------------ + +-- Put action for label arg (IMM_LG, IMM_PC, REL_LG, REL_PC). +local function wputlabel(aprefix, imm, num) + if type(imm) == "number" then + if imm < 0 then + waction("EXTERN") + wputxb(aprefix == "IMM_" and 0 or 1) + imm = -imm-1 + else + waction(aprefix.."LG", nil, num); + end + wputxb(imm) + else + waction(aprefix.."PC", imm, num) + end +end + +-- Put signed byte or arg. +local function wputsbarg(n) + if type(n) == "number" then + if n < -128 or n > 127 then + werror("signed immediate byte out of range") + end + if n < 0 then n = n + 256 end + wputb(n) + else waction("IMM_S", n) end +end + +-- Put unsigned byte or arg. +local function wputbarg(n) + if type(n) == "number" then + if n < 0 or n > 255 then + werror("unsigned immediate byte out of range") + end + wputb(n) + else waction("IMM_B", n) end +end + +-- Put unsigned word or arg. +local function wputwarg(n) + if type(n) == "number" then + if n < 0 or n > 65535 then + werror("unsigned immediate word out of range") + end + local r = n%256; n = (n-r)/256; wputb(r); wputb(n); + else waction("IMM_W", n) end +end + +-- Put signed or unsigned dword or arg. +local function wputdarg(n) + local tn = type(n) + if tn == "number" then + if n < 0 then n = n + 4294967296 end + local r = n%256; n = (n-r)/256; wputb(r); + r = n%256; n = (n-r)/256; wputb(r); + r = n%256; n = (n-r)/256; wputb(r); wputb(n); + elseif tn == "table" then + wputlabel("IMM_", n[1], 1) + else + waction("IMM_D", n) + end +end + +-- Put operand-size dependent number or arg (defaults to dword). +local function wputszarg(sz, n) + if not sz or sz == "d" or sz == "q" then wputdarg(n) + elseif sz == "w" then wputwarg(n) + elseif sz == "b" then wputbarg(n) + elseif sz == "s" then wputsbarg(n) + else werror("bad operand size") end +end + +-- Put multi-byte opcode with operand-size dependent modifications. +local function wputop(sz, op, rex) + local r + if rex ~= 0 and not x64 then werror("bad operand size") end + if sz == "w" then wputb(102) end + -- Needs >32 bit numbers, but only for crc32 eax, word [ebx] + if op >= 4294967296 then r = op%4294967296 wputb((op-r)/4294967296) op = r end + if op >= 16777216 then r = op % 16777216 wputb((op-r) / 16777216) op = r end + if op >= 65536 then + if rex ~= 0 then + local opc3 = op - op % 256 + if opc3 == 0x0f3a00 or opc3 == 0x0f3800 then + wputb(64 + rex % 16); rex = 0 + end + end + r = op % 65536 wputb((op-r) / 65536) op = r + end + if op >= 256 then + r = op % 256 + local b = (op-r) / 256 + if b == 15 and rex ~= 0 then wputb(64 + rex % 16); rex = 0 end + wputb(b) + op = r + end + if rex ~= 0 then wputb(64 + rex % 16) end + if sz == "b" then op = op - 1 end + wputb(op) +end + +-- Put ModRM or SIB formatted byte. +local function wputmodrm(m, s, rm, vs, vrm) + assert(m < 4 and s < 16 and rm < 16, "bad modrm operands") + wputb(64*m + 8*(s%8) + (rm%8)) +end + +-- Put ModRM/SIB plus optional displacement. +local function wputmrmsib(t, imark, s, vsreg) + local vreg, vxreg + local reg, xreg = t.reg, t.xreg + if reg and reg < 0 then reg = 0; vreg = t.vreg end + if xreg and xreg < 0 then xreg = 0; vxreg = t.vxreg end + if s < 0 then s = 0 end + + -- Register mode. + if sub(t.mode, 1, 1) == "r" then + wputmodrm(3, s, reg) + if vsreg then waction("VREG", vsreg); wputxb(2) end + if vreg then waction("VREG", vreg); wputxb(0) end + return + end + + local disp = t.disp + local tdisp = type(disp) + -- No base register? + if not reg then + local riprel = false + if xreg then + -- Indexed mode with index register only. + -- [xreg*xsc+disp] -> (0, s, esp) (xsc, xreg, ebp) + wputmodrm(0, s, 4) + if imark == "I" then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + wputmodrm(t.xsc, xreg, 5) + if vxreg then waction("VREG", vxreg); wputxb(3) end + else + -- Pure 32 bit displacement. + if x64 and tdisp ~= "table" then + wputmodrm(0, s, 4) -- [disp] -> (0, s, esp) (0, esp, ebp) + if imark == "I" then waction("MARK") end + wputmodrm(0, 4, 5) + else + riprel = x64 + wputmodrm(0, s, 5) -- [disp|rip-label] -> (0, s, ebp) + if imark == "I" then waction("MARK") end + end + if vsreg then waction("VREG", vsreg); wputxb(2) end + end + if riprel then -- Emit rip-relative displacement. + if match("UWSiI", imark) then + werror("NYI: rip-relative displacement followed by immediate") + end + -- The previous byte in the action buffer cannot be 0xe9 or 0x80-0x8f. + wputlabel("REL_", disp[1], 2) + else + wputdarg(disp) + end + return + end + + local m + if tdisp == "number" then -- Check displacement size at assembly time. + if disp == 0 and (reg%8) ~= 5 then -- [ebp] -> [ebp+0] (in SIB, too) + if not vreg then m = 0 end -- Force DISP to allow [Rd(5)] -> [ebp+0] + elseif disp >= -128 and disp <= 127 then m = 1 + else m = 2 end + elseif tdisp == "table" then + m = 2 + end + + -- Index register present or esp as base register: need SIB encoding. + if xreg or (reg%8) == 4 then + wputmodrm(m or 2, s, 4) -- ModRM. + if m == nil or imark == "I" then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + wputmodrm(t.xsc or 0, xreg or 4, reg) -- SIB. + if vxreg then waction("VREG", vxreg); wputxb(3) end + if vreg then waction("VREG", vreg); wputxb(1) end + else + wputmodrm(m or 2, s, reg) -- ModRM. + if (imark == "I" and (m == 1 or m == 2)) or + (m == nil and (vsreg or vreg)) then waction("MARK") end + if vsreg then waction("VREG", vsreg); wputxb(2) end + if vreg then waction("VREG", vreg); wputxb(1) end + end + + -- Put displacement. + if m == 1 then wputsbarg(disp) + elseif m == 2 then wputdarg(disp) + elseif m == nil then waction("DISP", disp) end +end + +------------------------------------------------------------------------------ + +-- Return human-readable operand mode string. +local function opmodestr(op, args) + local m = {} + for i=1,#args do + local a = args[i] + m[#m+1] = sub(a.mode, 1, 1)..(a.opsize or "?") + end + return op.." "..concat(m, ",") +end + +-- Convert number to valid integer or nil. +local function toint(expr) + local n = tonumber(expr) + if n then + if n % 1 ~= 0 or n < -2147483648 or n > 4294967295 then + werror("bad integer number `"..expr.."'") + end + return n + end +end + +-- Parse immediate expression. +local function immexpr(expr) + -- &expr (pointer) + if sub(expr, 1, 1) == "&" then + return "iPJ", format("(ptrdiff_t)(%s)", sub(expr,2)) + end + + local prefix = sub(expr, 1, 2) + -- =>expr (pc label reference) + if prefix == "=>" then + return "iJ", sub(expr, 3) + end + -- ->name (global label reference) + if prefix == "->" then + return "iJ", map_global[sub(expr, 3)] + end + + -- [<>][1-9] (local label reference) + local dir, lnum = match(expr, "^([<>])([1-9])$") + if dir then -- Fwd: 247-255, Bkwd: 1-9. + return "iJ", lnum + (dir == ">" and 246 or 0) + end + + local extname = match(expr, "^extern%s+(%S+)$") + if extname then + return "iJ", map_extern[extname] + end + + -- expr (interpreted as immediate) + return "iI", expr +end + +-- Parse displacement expression: +-num, +-expr, +-opsize*num +local function dispexpr(expr) + local disp = expr == "" and 0 or toint(expr) + if disp then return disp end + local c, dispt = match(expr, "^([+-])%s*(.+)$") + if c == "+" then + expr = dispt + elseif not c then + werror("bad displacement expression `"..expr.."'") + end + local opsize, tailops = match(dispt, "^(%w+)%s*%*%s*(.+)$") + local ops, imm = map_opsize[opsize], toint(tailops) + if ops and imm then + if c == "-" then imm = -imm end + return imm*map_opsizenum[ops] + end + local mode, iexpr = immexpr(dispt) + if mode == "iJ" then + if c == "-" then werror("cannot invert label reference") end + return { iexpr } + end + return expr -- Need to return original signed expression. +end + +-- Parse register or type expression. +local function rtexpr(expr) + if not expr then return end + local tname, ovreg = match(expr, "^([%w_]+):(@[%w_]+)$") + local tp = map_type[tname or expr] + if tp then + local reg = ovreg or tp.reg + local rnum = map_reg_num[reg] + if not rnum then + werror("type `"..(tname or expr).."' needs a register override") + end + if not map_reg_valid_base[reg] then + werror("bad base register override `"..(map_reg_rev[reg] or reg).."'") + end + return reg, rnum, tp + end + return expr, map_reg_num[expr] +end + +-- Parse operand and return { mode, opsize, reg, xreg, xsc, disp, imm }. +local function parseoperand(param) + local t = {} + + local expr = param + local opsize, tailops = match(param, "^(%w+)%s*(.+)$") + if opsize then + t.opsize = map_opsize[opsize] + if t.opsize then expr = tailops end + end + + local br = match(expr, "^%[%s*(.-)%s*%]$") + repeat + if br then + t.mode = "xm" + + -- [disp] + t.disp = toint(br) + if t.disp then + t.mode = x64 and "xm" or "xmO" + break + end + + -- [reg...] + local tp + local reg, tailr = match(br, "^([@%w_:]+)%s*(.*)$") + reg, t.reg, tp = rtexpr(reg) + if not t.reg then + -- [expr] + t.mode = x64 and "xm" or "xmO" + t.disp = dispexpr("+"..br) + break + end + + if t.reg == -1 then + t.vreg, tailr = match(tailr, "^(%b())(.*)$") + if not t.vreg then werror("bad variable register expression") end + end + + -- [xreg*xsc] or [xreg*xsc+-disp] or [xreg*xsc+-expr] + local xsc, tailsc = match(tailr, "^%*%s*([1248])%s*(.*)$") + if xsc then + if not map_reg_valid_index[reg] then + werror("bad index register `"..map_reg_rev[reg].."'") + end + t.xsc = map_xsc[xsc] + t.xreg = t.reg + t.vxreg = t.vreg + t.reg = nil + t.vreg = nil + t.disp = dispexpr(tailsc) + break + end + if not map_reg_valid_base[reg] then + werror("bad base register `"..map_reg_rev[reg].."'") + end + + -- [reg] or [reg+-disp] + t.disp = toint(tailr) or (tailr == "" and 0) + if t.disp then break end + + -- [reg+xreg...] + local xreg, tailx = match(tailr, "^+%s*([@%w_:]+)%s*(.*)$") + xreg, t.xreg, tp = rtexpr(xreg) + if not t.xreg then + -- [reg+-expr] + t.disp = dispexpr(tailr) + break + end + if not map_reg_valid_index[xreg] then + werror("bad index register `"..map_reg_rev[xreg].."'") + end + + if t.xreg == -1 then + t.vxreg, tailx = match(tailx, "^(%b())(.*)$") + if not t.vxreg then werror("bad variable register expression") end + end + + -- [reg+xreg*xsc...] + local xsc, tailsc = match(tailx, "^%*%s*([1248])%s*(.*)$") + if xsc then + t.xsc = map_xsc[xsc] + tailx = tailsc + end + + -- [...] or [...+-disp] or [...+-expr] + t.disp = dispexpr(tailx) + else + -- imm or opsize*imm + local imm = toint(expr) + if not imm and sub(expr, 1, 1) == "*" and t.opsize then + imm = toint(sub(expr, 2)) + if imm then + imm = imm * map_opsizenum[t.opsize] + t.opsize = nil + end + end + if imm then + if t.opsize then werror("bad operand size override") end + local m = "i" + if imm == 1 then m = m.."1" end + if imm >= 4294967168 and imm <= 4294967295 then imm = imm-4294967296 end + if imm >= -128 and imm <= 127 then m = m.."S" end + t.imm = imm + t.mode = m + break + end + + local tp + local reg, tailr = match(expr, "^([@%w_:]+)%s*(.*)$") + reg, t.reg, tp = rtexpr(reg) + if t.reg then + if t.reg == -1 then + t.vreg, tailr = match(tailr, "^(%b())(.*)$") + if not t.vreg then werror("bad variable register expression") end + end + -- reg + if tailr == "" then + if t.opsize then werror("bad operand size override") end + t.opsize = map_reg_opsize[reg] + if t.opsize == "f" then + t.mode = t.reg == 0 and "fF" or "f" + else + if reg == "@w4" or (x64 and reg == "@d4") then + wwarn("bad idea, try again with `"..(x64 and "rsp'" or "esp'")) + end + t.mode = t.reg == 0 and "rmR" or (reg == "@b1" and "rmC" or "rm") + end + t.needrex = map_reg_needrex[reg] + break + end + + -- type[idx], type[idx].field, type->field -> [reg+offset_expr] + if not tp then werror("bad operand `"..param.."'") end + t.mode = "xm" + t.disp = format(tp.ctypefmt, tailr) + else + t.mode, t.imm = immexpr(expr) + if sub(t.mode, -1) == "J" then + if t.opsize and t.opsize ~= addrsize then + werror("bad operand size override") + end + t.opsize = addrsize + end + end + end + until true + return t +end + +------------------------------------------------------------------------------ +-- x86 Template String Description +-- =============================== +-- +-- Each template string is a list of [match:]pattern pairs, +-- separated by "|". The first match wins. No match means a +-- bad or unsupported combination of operand modes or sizes. +-- +-- The match part and the ":" is omitted if the operation has +-- no operands. Otherwise the first N characters are matched +-- against the mode strings of each of the N operands. +-- +-- The mode string for each operand type is (see parseoperand()): +-- Integer register: "rm", +"R" for eax, ax, al, +"C" for cl +-- FP register: "f", +"F" for st0 +-- Index operand: "xm", +"O" for [disp] (pure offset) +-- Immediate: "i", +"S" for signed 8 bit, +"1" for 1, +-- +"I" for arg, +"P" for pointer +-- Any: +"J" for valid jump targets +-- +-- So a match character "m" (mixed) matches both an integer register +-- and an index operand (to be encoded with the ModRM/SIB scheme). +-- But "r" matches only a register and "x" only an index operand +-- (e.g. for FP memory access operations). +-- +-- The operand size match string starts right after the mode match +-- characters and ends before the ":". "dwb" or "qdwb" is assumed, if empty. +-- The effective data size of the operation is matched against this list. +-- +-- If only the regular "b", "w", "d", "q", "t" operand sizes are +-- present, then all operands must be the same size. Unspecified sizes +-- are ignored, but at least one operand must have a size or the pattern +-- won't match (use the "byte", "word", "dword", "qword", "tword" +-- operand size overrides. E.g.: mov dword [eax], 1). +-- +-- If the list has a "1" or "2" prefix, the operand size is taken +-- from the respective operand and any other operand sizes are ignored. +-- If the list contains only ".", all operand sizes are ignored. +-- If the list has a "/" prefix, the concatenated (mixed) operand sizes +-- are compared to the match. +-- +-- E.g. "rrdw" matches for either two dword registers or two word +-- registers. "Fx2dq" matches an st0 operand plus an index operand +-- pointing to a dword (float) or qword (double). +-- +-- Every character after the ":" is part of the pattern string: +-- Hex chars are accumulated to form the opcode (left to right). +-- "n" disables the standard opcode mods +-- (otherwise: -1 for "b", o16 prefix for "w", rex.w for "q") +-- "X" Force REX.W. +-- "r"/"R" adds the reg. number from the 1st/2nd operand to the opcode. +-- "m"/"M" generates ModRM/SIB from the 1st/2nd operand. +-- The spare 3 bits are either filled with the last hex digit or +-- the result from a previous "r"/"R". The opcode is restored. +-- +-- All of the following characters force a flush of the opcode: +-- "o"/"O" stores a pure 32 bit disp (offset) from the 1st/2nd operand. +-- "S" stores a signed 8 bit immediate from the last operand. +-- "U" stores an unsigned 8 bit immediate from the last operand. +-- "W" stores an unsigned 16 bit immediate from the last operand. +-- "i" stores an operand sized immediate from the last operand. +-- "I" dito, but generates an action code to optionally modify +-- the opcode (+2) for a signed 8 bit immediate. +-- "J" generates one of the REL action codes from the last operand. +-- +------------------------------------------------------------------------------ + +-- Template strings for x86 instructions. Ordered by first opcode byte. +-- Unimplemented opcodes (deliberate omissions) are marked with *. +local map_op = { + -- 00-05: add... + -- 06: *push es + -- 07: *pop es + -- 08-0D: or... + -- 0E: *push cs + -- 0F: two byte opcode prefix + -- 10-15: adc... + -- 16: *push ss + -- 17: *pop ss + -- 18-1D: sbb... + -- 1E: *push ds + -- 1F: *pop ds + -- 20-25: and... + es_0 = "26", + -- 27: *daa + -- 28-2D: sub... + cs_0 = "2E", + -- 2F: *das + -- 30-35: xor... + ss_0 = "36", + -- 37: *aaa + -- 38-3D: cmp... + ds_0 = "3E", + -- 3F: *aas + inc_1 = x64 and "m:FF0m" or "rdw:40r|m:FF0m", + dec_1 = x64 and "m:FF1m" or "rdw:48r|m:FF1m", + push_1 = (x64 and "rq:n50r|rw:50r|mq:nFF6m|mw:FF6m" or + "rdw:50r|mdw:FF6m").."|S.:6AS|ib:n6Ai|i.:68i", + pop_1 = x64 and "rq:n58r|rw:58r|mq:n8F0m|mw:8F0m" or "rdw:58r|mdw:8F0m", + -- 60: *pusha, *pushad, *pushaw + -- 61: *popa, *popad, *popaw + -- 62: *bound rdw,x + -- 63: x86: *arpl mw,rw + movsxd_2 = x64 and "rm/qd:63rM", + fs_0 = "64", + gs_0 = "65", + o16_0 = "66", + a16_0 = not x64 and "67" or nil, + a32_0 = x64 and "67", + -- 68: push idw + -- 69: imul rdw,mdw,idw + -- 6A: push ib + -- 6B: imul rdw,mdw,S + -- 6C: *insb + -- 6D: *insd, *insw + -- 6E: *outsb + -- 6F: *outsd, *outsw + -- 70-7F: jcc lb + -- 80: add... mb,i + -- 81: add... mdw,i + -- 82: *undefined + -- 83: add... mdw,S + test_2 = "mr:85Rm|rm:85rM|Ri:A9ri|mi:F70mi", + -- 86: xchg rb,mb + -- 87: xchg rdw,mdw + -- 88: mov mb,r + -- 89: mov mdw,r + -- 8A: mov r,mb + -- 8B: mov r,mdw + -- 8C: *mov mdw,seg + lea_2 = "rx1dq:8DrM", + -- 8E: *mov seg,mdw + -- 8F: pop mdw + nop_0 = "90", + xchg_2 = "Rrqdw:90R|rRqdw:90r|rm:87rM|mr:87Rm", + cbw_0 = "6698", + cwde_0 = "98", + cdqe_0 = "4898", + cwd_0 = "6699", + cdq_0 = "99", + cqo_0 = "4899", + -- 9A: *call iw:idw + wait_0 = "9B", + fwait_0 = "9B", + pushf_0 = "9C", + pushfd_0 = not x64 and "9C", + pushfq_0 = x64 and "9C", + popf_0 = "9D", + popfd_0 = not x64 and "9D", + popfq_0 = x64 and "9D", + sahf_0 = "9E", + lahf_0 = "9F", + mov_2 = "OR:A3o|RO:A1O|mr:89Rm|rm:8BrM|rib:nB0ri|ridw:B8ri|mi:C70mi", + movsb_0 = "A4", + movsw_0 = "66A5", + movsd_0 = "A5", + cmpsb_0 = "A6", + cmpsw_0 = "66A7", + cmpsd_0 = "A7", + -- A8: test Rb,i + -- A9: test Rdw,i + stosb_0 = "AA", + stosw_0 = "66AB", + stosd_0 = "AB", + lodsb_0 = "AC", + lodsw_0 = "66AD", + lodsd_0 = "AD", + scasb_0 = "AE", + scasw_0 = "66AF", + scasd_0 = "AF", + -- B0-B7: mov rb,i + -- B8-BF: mov rdw,i + -- C0: rol... mb,i + -- C1: rol... mdw,i + ret_1 = "i.:nC2W", + ret_0 = "C3", + -- C4: *les rdw,mq + -- C5: *lds rdw,mq + -- C6: mov mb,i + -- C7: mov mdw,i + -- C8: *enter iw,ib + leave_0 = "C9", + -- CA: *retf iw + -- CB: *retf + int3_0 = "CC", + int_1 = "i.:nCDU", + into_0 = "CE", + -- CF: *iret + -- D0: rol... mb,1 + -- D1: rol... mdw,1 + -- D2: rol... mb,cl + -- D3: rol... mb,cl + -- D4: *aam ib + -- D5: *aad ib + -- D6: *salc + -- D7: *xlat + -- D8-DF: floating point ops + -- E0: *loopne + -- E1: *loope + -- E2: *loop + -- E3: *jcxz, *jecxz + -- E4: *in Rb,ib + -- E5: *in Rdw,ib + -- E6: *out ib,Rb + -- E7: *out ib,Rdw + call_1 = x64 and "mq:nFF2m|J.:E8nJ" or "md:FF2m|J.:E8J", + jmp_1 = x64 and "mq:nFF4m|J.:E9nJ" or "md:FF4m|J.:E9J", -- short: EB + -- EA: *jmp iw:idw + -- EB: jmp ib + -- EC: *in Rb,dx + -- ED: *in Rdw,dx + -- EE: *out dx,Rb + -- EF: *out dx,Rdw + -- F0: *lock + int1_0 = "F1", + repne_0 = "F2", + repnz_0 = "F2", + rep_0 = "F3", + repe_0 = "F3", + repz_0 = "F3", + -- F4: *hlt + cmc_0 = "F5", + -- F6: test... mb,i; div... mb + -- F7: test... mdw,i; div... mdw + clc_0 = "F8", + stc_0 = "F9", + -- FA: *cli + cld_0 = "FC", + std_0 = "FD", + -- FE: inc... mb + -- FF: inc... mdw + + -- misc ops + not_1 = "m:F72m", + neg_1 = "m:F73m", + mul_1 = "m:F74m", + imul_1 = "m:F75m", + div_1 = "m:F76m", + idiv_1 = "m:F77m", + + imul_2 = "rmqdw:0FAFrM|rIqdw:69rmI|rSqdw:6BrmS|riqdw:69rmi", + imul_3 = "rmIqdw:69rMI|rmSqdw:6BrMS|rmiqdw:69rMi", + + movzx_2 = "rm/db:0FB6rM|rm/qb:|rm/wb:0FB6rM|rm/dw:0FB7rM|rm/qw:", + movsx_2 = "rm/db:0FBErM|rm/qb:|rm/wb:0FBErM|rm/dw:0FBFrM|rm/qw:", + + bswap_1 = "rqd:0FC8r", + bsf_2 = "rmqdw:0FBCrM", + bsr_2 = "rmqdw:0FBDrM", + bt_2 = "mrqdw:0FA3Rm|miqdw:0FBA4mU", + btc_2 = "mrqdw:0FBBRm|miqdw:0FBA7mU", + btr_2 = "mrqdw:0FB3Rm|miqdw:0FBA6mU", + bts_2 = "mrqdw:0FABRm|miqdw:0FBA5mU", + + rdtsc_0 = "0F31", -- P1+ + cpuid_0 = "0FA2", -- P1+ + + -- floating point ops + fst_1 = "ff:DDD0r|xd:D92m|xq:nDD2m", + fstp_1 = "ff:DDD8r|xd:D93m|xq:nDD3m|xt:DB7m", + fld_1 = "ff:D9C0r|xd:D90m|xq:nDD0m|xt:DB5m", + + fpop_0 = "DDD8", -- Alias for fstp st0. + + fist_1 = "xw:nDF2m|xd:DB2m", + fistp_1 = "xw:nDF3m|xd:DB3m|xq:nDF7m", + fild_1 = "xw:nDF0m|xd:DB0m|xq:nDF5m", + + fxch_0 = "D9C9", + fxch_1 = "ff:D9C8r", + fxch_2 = "fFf:D9C8r|Fff:D9C8R", + + fucom_1 = "ff:DDE0r", + fucom_2 = "Fff:DDE0R", + fucomp_1 = "ff:DDE8r", + fucomp_2 = "Fff:DDE8R", + fucomi_1 = "ff:DBE8r", -- P6+ + fucomi_2 = "Fff:DBE8R", -- P6+ + fucomip_1 = "ff:DFE8r", -- P6+ + fucomip_2 = "Fff:DFE8R", -- P6+ + fcomi_1 = "ff:DBF0r", -- P6+ + fcomi_2 = "Fff:DBF0R", -- P6+ + fcomip_1 = "ff:DFF0r", -- P6+ + fcomip_2 = "Fff:DFF0R", -- P6+ + fucompp_0 = "DAE9", + fcompp_0 = "DED9", + + fldcw_1 = "xw:nD95m", + fstcw_1 = "xw:n9BD97m", + fnstcw_1 = "xw:nD97m", + fstsw_1 = "Rw:n9BDFE0|xw:n9BDD7m", + fnstsw_1 = "Rw:nDFE0|xw:nDD7m", + fclex_0 = "9BDBE2", + fnclex_0 = "DBE2", + + fnop_0 = "D9D0", + -- D9D1-D9DF: unassigned + + fchs_0 = "D9E0", + fabs_0 = "D9E1", + -- D9E2: unassigned + -- D9E3: unassigned + ftst_0 = "D9E4", + fxam_0 = "D9E5", + -- D9E6: unassigned + -- D9E7: unassigned + fld1_0 = "D9E8", + fldl2t_0 = "D9E9", + fldl2e_0 = "D9EA", + fldpi_0 = "D9EB", + fldlg2_0 = "D9EC", + fldln2_0 = "D9ED", + fldz_0 = "D9EE", + -- D9EF: unassigned + + f2xm1_0 = "D9F0", + fyl2x_0 = "D9F1", + fptan_0 = "D9F2", + fpatan_0 = "D9F3", + fxtract_0 = "D9F4", + fprem1_0 = "D9F5", + fdecstp_0 = "D9F6", + fincstp_0 = "D9F7", + fprem_0 = "D9F8", + fyl2xp1_0 = "D9F9", + fsqrt_0 = "D9FA", + fsincos_0 = "D9FB", + frndint_0 = "D9FC", + fscale_0 = "D9FD", + fsin_0 = "D9FE", + fcos_0 = "D9FF", + + -- SSE, SSE2 + andnpd_2 = "rmo:660F55rM", + andnps_2 = "rmo:0F55rM", + andpd_2 = "rmo:660F54rM", + andps_2 = "rmo:0F54rM", + clflush_1 = "x.:0FAE7m", + cmppd_3 = "rmio:660FC2rMU", + cmpps_3 = "rmio:0FC2rMU", + cmpsd_3 = "rrio:F20FC2rMU|rxi/oq:", + cmpss_3 = "rrio:F30FC2rMU|rxi/od:", + comisd_2 = "rro:660F2FrM|rx/oq:", + comiss_2 = "rro:0F2FrM|rx/od:", + cvtdq2pd_2 = "rro:F30FE6rM|rx/oq:", + cvtdq2ps_2 = "rmo:0F5BrM", + cvtpd2dq_2 = "rmo:F20FE6rM", + cvtpd2ps_2 = "rmo:660F5ArM", + cvtpi2pd_2 = "rx/oq:660F2ArM", + cvtpi2ps_2 = "rx/oq:0F2ArM", + cvtps2dq_2 = "rmo:660F5BrM", + cvtps2pd_2 = "rro:0F5ArM|rx/oq:", + cvtsd2si_2 = "rr/do:F20F2DrM|rr/qo:|rx/dq:|rxq:", + cvtsd2ss_2 = "rro:F20F5ArM|rx/oq:", + cvtsi2sd_2 = "rm/od:F20F2ArM|rm/oq:F20F2ArXM", + cvtsi2ss_2 = "rm/od:F30F2ArM|rm/oq:F30F2ArXM", + cvtss2sd_2 = "rro:F30F5ArM|rx/od:", + cvtss2si_2 = "rr/do:F20F2CrM|rr/qo:|rxd:|rx/qd:", + cvttpd2dq_2 = "rmo:660FE6rM", + cvttps2dq_2 = "rmo:F30F5BrM", + cvttsd2si_2 = "rr/do:F20F2CrM|rr/qo:|rx/dq:|rxq:", + cvttss2si_2 = "rr/do:F30F2CrM|rr/qo:|rxd:|rx/qd:", + ldmxcsr_1 = "xd:0FAE2m", + lfence_0 = "0FAEE8", + maskmovdqu_2 = "rro:660FF7rM", + mfence_0 = "0FAEF0", + movapd_2 = "rmo:660F28rM|mro:660F29Rm", + movaps_2 = "rmo:0F28rM|mro:0F29Rm", + movd_2 = "rm/od:660F6ErM|rm/oq:660F6ErXM|mr/do:660F7ERm|mr/qo:", + movdqa_2 = "rmo:660F6FrM|mro:660F7FRm", + movdqu_2 = "rmo:F30F6FrM|mro:F30F7FRm", + movhlps_2 = "rro:0F12rM", + movhpd_2 = "rx/oq:660F16rM|xr/qo:n660F17Rm", + movhps_2 = "rx/oq:0F16rM|xr/qo:n0F17Rm", + movlhps_2 = "rro:0F16rM", + movlpd_2 = "rx/oq:660F12rM|xr/qo:n660F13Rm", + movlps_2 = "rx/oq:0F12rM|xr/qo:n0F13Rm", + movmskpd_2 = "rr/do:660F50rM", + movmskps_2 = "rr/do:0F50rM", + movntdq_2 = "xro:660FE7Rm", + movnti_2 = "xrqd:0FC3Rm", + movntpd_2 = "xro:660F2BRm", + movntps_2 = "xro:0F2BRm", + movq_2 = "rro:F30F7ErM|rx/oq:|xr/qo:n660FD6Rm", + movsd_2 = "rro:F20F10rM|rx/oq:|xr/qo:nF20F11Rm", + movss_2 = "rro:F30F10rM|rx/od:|xr/do:F30F11Rm", + movupd_2 = "rmo:660F10rM|mro:660F11Rm", + movups_2 = "rmo:0F10rM|mro:0F11Rm", + orpd_2 = "rmo:660F56rM", + orps_2 = "rmo:0F56rM", + packssdw_2 = "rmo:660F6BrM", + packsswb_2 = "rmo:660F63rM", + packuswb_2 = "rmo:660F67rM", + paddb_2 = "rmo:660FFCrM", + paddd_2 = "rmo:660FFErM", + paddq_2 = "rmo:660FD4rM", + paddsb_2 = "rmo:660FECrM", + paddsw_2 = "rmo:660FEDrM", + paddusb_2 = "rmo:660FDCrM", + paddusw_2 = "rmo:660FDDrM", + paddw_2 = "rmo:660FFDrM", + pand_2 = "rmo:660FDBrM", + pandn_2 = "rmo:660FDFrM", + pause_0 = "F390", + pavgb_2 = "rmo:660FE0rM", + pavgw_2 = "rmo:660FE3rM", + pcmpeqb_2 = "rmo:660F74rM", + pcmpeqd_2 = "rmo:660F76rM", + pcmpeqw_2 = "rmo:660F75rM", + pcmpgtb_2 = "rmo:660F64rM", + pcmpgtd_2 = "rmo:660F66rM", + pcmpgtw_2 = "rmo:660F65rM", + pextrw_3 = "rri/do:660FC5rMU|xri/wo:660F3A15nrMU", -- Mem op: SSE4.1 only. + pinsrw_3 = "rri/od:660FC4rMU|rxi/ow:", + pmaddwd_2 = "rmo:660FF5rM", + pmaxsw_2 = "rmo:660FEErM", + pmaxub_2 = "rmo:660FDErM", + pminsw_2 = "rmo:660FEArM", + pminub_2 = "rmo:660FDArM", + pmovmskb_2 = "rr/do:660FD7rM", + pmulhuw_2 = "rmo:660FE4rM", + pmulhw_2 = "rmo:660FE5rM", + pmullw_2 = "rmo:660FD5rM", + pmuludq_2 = "rmo:660FF4rM", + por_2 = "rmo:660FEBrM", + prefetchnta_1 = "xb:n0F180m", + prefetcht0_1 = "xb:n0F181m", + prefetcht1_1 = "xb:n0F182m", + prefetcht2_1 = "xb:n0F183m", + psadbw_2 = "rmo:660FF6rM", + pshufd_3 = "rmio:660F70rMU", + pshufhw_3 = "rmio:F30F70rMU", + pshuflw_3 = "rmio:F20F70rMU", + pslld_2 = "rmo:660FF2rM|rio:660F726mU", + pslldq_2 = "rio:660F737mU", + psllq_2 = "rmo:660FF3rM|rio:660F736mU", + psllw_2 = "rmo:660FF1rM|rio:660F716mU", + psrad_2 = "rmo:660FE2rM|rio:660F724mU", + psraw_2 = "rmo:660FE1rM|rio:660F714mU", + psrld_2 = "rmo:660FD2rM|rio:660F722mU", + psrldq_2 = "rio:660F733mU", + psrlq_2 = "rmo:660FD3rM|rio:660F732mU", + psrlw_2 = "rmo:660FD1rM|rio:660F712mU", + psubb_2 = "rmo:660FF8rM", + psubd_2 = "rmo:660FFArM", + psubq_2 = "rmo:660FFBrM", + psubsb_2 = "rmo:660FE8rM", + psubsw_2 = "rmo:660FE9rM", + psubusb_2 = "rmo:660FD8rM", + psubusw_2 = "rmo:660FD9rM", + psubw_2 = "rmo:660FF9rM", + punpckhbw_2 = "rmo:660F68rM", + punpckhdq_2 = "rmo:660F6ArM", + punpckhqdq_2 = "rmo:660F6DrM", + punpckhwd_2 = "rmo:660F69rM", + punpcklbw_2 = "rmo:660F60rM", + punpckldq_2 = "rmo:660F62rM", + punpcklqdq_2 = "rmo:660F6CrM", + punpcklwd_2 = "rmo:660F61rM", + pxor_2 = "rmo:660FEFrM", + rcpps_2 = "rmo:0F53rM", + rcpss_2 = "rro:F30F53rM|rx/od:", + rsqrtps_2 = "rmo:0F52rM", + rsqrtss_2 = "rmo:F30F52rM", + sfence_0 = "0FAEF8", + shufpd_3 = "rmio:660FC6rMU", + shufps_3 = "rmio:0FC6rMU", + stmxcsr_1 = "xd:0FAE3m", + ucomisd_2 = "rro:660F2ErM|rx/oq:", + ucomiss_2 = "rro:0F2ErM|rx/od:", + unpckhpd_2 = "rmo:660F15rM", + unpckhps_2 = "rmo:0F15rM", + unpcklpd_2 = "rmo:660F14rM", + unpcklps_2 = "rmo:0F14rM", + xorpd_2 = "rmo:660F57rM", + xorps_2 = "rmo:0F57rM", + + -- SSE3 ops + fisttp_1 = "xw:nDF1m|xd:DB1m|xq:nDD1m", + addsubpd_2 = "rmo:660FD0rM", + addsubps_2 = "rmo:F20FD0rM", + haddpd_2 = "rmo:660F7CrM", + haddps_2 = "rmo:F20F7CrM", + hsubpd_2 = "rmo:660F7DrM", + hsubps_2 = "rmo:F20F7DrM", + lddqu_2 = "rxo:F20FF0rM", + movddup_2 = "rmo:F20F12rM", + movshdup_2 = "rmo:F30F16rM", + movsldup_2 = "rmo:F30F12rM", + + -- SSSE3 ops + pabsb_2 = "rmo:660F381CrM", + pabsd_2 = "rmo:660F381ErM", + pabsw_2 = "rmo:660F381DrM", + palignr_3 = "rmio:660F3A0FrMU", + phaddd_2 = "rmo:660F3802rM", + phaddsw_2 = "rmo:660F3803rM", + phaddw_2 = "rmo:660F3801rM", + phsubd_2 = "rmo:660F3806rM", + phsubsw_2 = "rmo:660F3807rM", + phsubw_2 = "rmo:660F3805rM", + pmaddubsw_2 = "rmo:660F3804rM", + pmulhrsw_2 = "rmo:660F380BrM", + pshufb_2 = "rmo:660F3800rM", + psignb_2 = "rmo:660F3808rM", + psignd_2 = "rmo:660F380ArM", + psignw_2 = "rmo:660F3809rM", + + -- SSE4.1 ops + blendpd_3 = "rmio:660F3A0DrMU", + blendps_3 = "rmio:660F3A0CrMU", + blendvpd_3 = "rmRo:660F3815rM", + blendvps_3 = "rmRo:660F3814rM", + dppd_3 = "rmio:660F3A41rMU", + dpps_3 = "rmio:660F3A40rMU", + extractps_3 = "mri/do:660F3A17RmU|rri/qo:660F3A17RXmU", + insertps_3 = "rrio:660F3A41rMU|rxi/od:", + movntdqa_2 = "rmo:660F382ArM", + mpsadbw_3 = "rmio:660F3A42rMU", + packusdw_2 = "rmo:660F382BrM", + pblendvb_3 = "rmRo:660F3810rM", + pblendw_3 = "rmio:660F3A0ErMU", + pcmpeqq_2 = "rmo:660F3829rM", + pextrb_3 = "rri/do:660F3A14nRmU|rri/qo:|xri/bo:", + pextrd_3 = "mri/do:660F3A16RmU", + pextrq_3 = "mri/qo:660F3A16RmU", + -- pextrw is SSE2, mem operand is SSE4.1 only + phminposuw_2 = "rmo:660F3841rM", + pinsrb_3 = "rri/od:660F3A20nrMU|rxi/ob:", + pinsrd_3 = "rmi/od:660F3A22rMU", + pinsrq_3 = "rmi/oq:660F3A22rXMU", + pmaxsb_2 = "rmo:660F383CrM", + pmaxsd_2 = "rmo:660F383DrM", + pmaxud_2 = "rmo:660F383FrM", + pmaxuw_2 = "rmo:660F383ErM", + pminsb_2 = "rmo:660F3838rM", + pminsd_2 = "rmo:660F3839rM", + pminud_2 = "rmo:660F383BrM", + pminuw_2 = "rmo:660F383ArM", + pmovsxbd_2 = "rro:660F3821rM|rx/od:", + pmovsxbq_2 = "rro:660F3822rM|rx/ow:", + pmovsxbw_2 = "rro:660F3820rM|rx/oq:", + pmovsxdq_2 = "rro:660F3825rM|rx/oq:", + pmovsxwd_2 = "rro:660F3823rM|rx/oq:", + pmovsxwq_2 = "rro:660F3824rM|rx/od:", + pmovzxbd_2 = "rro:660F3831rM|rx/od:", + pmovzxbq_2 = "rro:660F3832rM|rx/ow:", + pmovzxbw_2 = "rro:660F3830rM|rx/oq:", + pmovzxdq_2 = "rro:660F3835rM|rx/oq:", + pmovzxwd_2 = "rro:660F3833rM|rx/oq:", + pmovzxwq_2 = "rro:660F3834rM|rx/od:", + pmuldq_2 = "rmo:660F3828rM", + pmulld_2 = "rmo:660F3840rM", + ptest_2 = "rmo:660F3817rM", + roundpd_3 = "rmio:660F3A09rMU", + roundps_3 = "rmio:660F3A08rMU", + roundsd_3 = "rrio:660F3A0BrMU|rxi/oq:", + roundss_3 = "rrio:660F3A0ArMU|rxi/od:", + + -- SSE4.2 ops + crc32_2 = "rmqd:F20F38F1rM|rm/dw:66F20F38F1rM|rm/db:F20F38F0rM|rm/qb:", + pcmpestri_3 = "rmio:660F3A61rMU", + pcmpestrm_3 = "rmio:660F3A60rMU", + pcmpgtq_2 = "rmo:660F3837rM", + pcmpistri_3 = "rmio:660F3A63rMU", + pcmpistrm_3 = "rmio:660F3A62rMU", + popcnt_2 = "rmqdw:F30FB8rM", + + -- SSE4a + extrq_2 = "rro:660F79rM", + extrq_3 = "riio:660F780mUU", + insertq_2 = "rro:F20F79rM", + insertq_4 = "rriio:F20F78rMUU", + lzcnt_2 = "rmqdw:F30FBDrM", + movntsd_2 = "xr/qo:nF20F2BRm", + movntss_2 = "xr/do:F30F2BRm", + -- popcnt is also in SSE4.2 +} + +------------------------------------------------------------------------------ + +-- Arithmetic ops. +for name,n in pairs{ add = 0, ["or"] = 1, adc = 2, sbb = 3, + ["and"] = 4, sub = 5, xor = 6, cmp = 7 } do + local n8 = n * 8 + map_op[name.."_2"] = format( + "mr:%02XRm|rm:%02XrM|mI1qdw:81%XmI|mS1qdw:83%XmS|Ri1qdwb:%02Xri|mi1qdwb:81%Xmi", + 1+n8, 3+n8, n, n, 5+n8, n) +end + +-- Shift ops. +for name,n in pairs{ rol = 0, ror = 1, rcl = 2, rcr = 3, + shl = 4, shr = 5, sar = 7, sal = 4 } do + map_op[name.."_2"] = format("m1:D1%Xm|mC1qdwb:D3%Xm|mi:C1%XmU", n, n, n) +end + +-- Conditional ops. +for cc,n in pairs(map_cc) do + map_op["j"..cc.."_1"] = format("J.:n0F8%XJ", n) -- short: 7%X + map_op["set"..cc.."_1"] = format("mb:n0F9%X2m", n) + map_op["cmov"..cc.."_2"] = format("rmqdw:0F4%XrM", n) -- P6+ +end + +-- FP arithmetic ops. +for name,n in pairs{ add = 0, mul = 1, com = 2, comp = 3, + sub = 4, subr = 5, div = 6, divr = 7 } do + local nc = 192 + n * 8 + local nr = nc + (n < 4 and 0 or (n % 2 == 0 and 8 or -8)) + local fn = "f"..name + map_op[fn.."_1"] = format("ff:D8%02Xr|xd:D8%Xm|xq:nDC%Xm", nc, n, n) + if n == 2 or n == 3 then + map_op[fn.."_2"] = format("Fff:D8%02XR|Fx2d:D8%XM|Fx2q:nDC%XM", nc, n, n) + else + map_op[fn.."_2"] = format("Fff:D8%02XR|fFf:DC%02Xr|Fx2d:D8%XM|Fx2q:nDC%XM", nc, nr, n, n) + map_op[fn.."p_1"] = format("ff:DE%02Xr", nr) + map_op[fn.."p_2"] = format("fFf:DE%02Xr", nr) + end + map_op["fi"..name.."_1"] = format("xd:DA%Xm|xw:nDE%Xm", n, n) +end + +-- FP conditional moves. +for cc,n in pairs{ b=0, e=1, be=2, u=3, nb=4, ne=5, nbe=6, nu=7 } do + local n4 = n % 4 + local nc = 56000 + n4 * 8 + (n-n4) * 64 + map_op["fcmov"..cc.."_1"] = format("ff:%04Xr", nc) -- P6+ + map_op["fcmov"..cc.."_2"] = format("Fff:%04XR", nc) -- P6+ +end + +-- SSE FP arithmetic ops. +for name,n in pairs{ sqrt = 1, add = 8, mul = 9, + sub = 12, min = 13, div = 14, max = 15 } do + map_op[name.."ps_2"] = format("rmo:0F5%XrM", n) + map_op[name.."ss_2"] = format("rro:F30F5%XrM|rx/od:", n) + map_op[name.."pd_2"] = format("rmo:660F5%XrM", n) + map_op[name.."sd_2"] = format("rro:F20F5%XrM|rx/oq:", n) +end + +------------------------------------------------------------------------------ + +-- Process pattern string. +local function dopattern(pat, args, sz, op, needrex) + local digit, addin + local opcode = 0 + local szov = sz + local narg = 1 + local rex = 0 + + -- Limit number of section buffer positions used by a single dasm_put(). + -- A single opcode needs a maximum of 5 positions. + if secpos+5 > maxsecpos then wflush() end + + -- Process each character. + for c in gmatch(pat.."|", ".") do + if match(c, "%x") then -- Hex digit. + digit = byte(c) - 48 + if digit > 48 then digit = digit - 39 + elseif digit > 16 then digit = digit - 7 end + opcode = opcode*16 + digit + addin = nil + elseif c == "n" then -- Disable operand size mods for opcode. + szov = nil + elseif c == "X" then -- Force REX.W. + rex = 8 + elseif c == "r" then -- Merge 1st operand regno. into opcode. + addin = args[1]; opcode = opcode + (addin.reg % 8) + if narg < 2 then narg = 2 end + elseif c == "R" then -- Merge 2nd operand regno. into opcode. + addin = args[2]; opcode = opcode + (addin.reg % 8) + narg = 3 + elseif c == "m" or c == "M" then -- Encode ModRM/SIB. + local s + if addin then + s = addin.reg + opcode = opcode - (s%8) -- Undo regno opcode merge. + else + s = opcode % 16 -- Undo last digit. + opcode = (opcode - s) / 16 + end + local nn = c == "m" and 1 or 2 + local t = args[nn] + if narg <= nn then narg = nn + 1 end + if szov == "q" and rex == 0 then rex = rex + 8 end + if t.reg and t.reg > 7 then rex = rex + 1 end + if t.xreg and t.xreg > 7 then rex = rex + 2 end + if s > 7 then rex = rex + 4 end + if needrex then rex = rex + 16 end + wputop(szov, opcode, rex); opcode = nil + local imark = sub(pat, -1) -- Force a mark (ugly). + -- Put ModRM/SIB with regno/last digit as spare. + wputmrmsib(t, imark, s, addin and addin.vreg) + addin = nil + else + if opcode then -- Flush opcode. + if szov == "q" and rex == 0 then rex = rex + 8 end + if needrex then rex = rex + 16 end + if addin and addin.reg == -1 then + wputop(szov, opcode - 7, rex) + waction("VREG", addin.vreg); wputxb(0) + else + if addin and addin.reg > 7 then rex = rex + 1 end + wputop(szov, opcode, rex) + end + opcode = nil + end + if c == "|" then break end + if c == "o" then -- Offset (pure 32 bit displacement). + wputdarg(args[1].disp); if narg < 2 then narg = 2 end + elseif c == "O" then + wputdarg(args[2].disp); narg = 3 + else + -- Anything else is an immediate operand. + local a = args[narg] + narg = narg + 1 + local mode, imm = a.mode, a.imm + if mode == "iJ" and not match("iIJ", c) then + werror("bad operand size for label") + end + if c == "S" then + wputsbarg(imm) + elseif c == "U" then + wputbarg(imm) + elseif c == "W" then + wputwarg(imm) + elseif c == "i" or c == "I" then + if mode == "iJ" then + wputlabel("IMM_", imm, 1) + elseif mode == "iI" and c == "I" then + waction(sz == "w" and "IMM_WB" or "IMM_DB", imm) + else + wputszarg(sz, imm) + end + elseif c == "J" then + if mode == "iPJ" then + waction("REL_A", imm) -- !x64 (secpos) + else + wputlabel("REL_", imm, 2) + end + else + werror("bad char `"..c.."' in pattern `"..pat.."' for `"..op.."'") + end + end + end + end +end + +------------------------------------------------------------------------------ + +-- Mapping of operand modes to short names. Suppress output with '#'. +local map_modename = { + r = "reg", R = "eax", C = "cl", x = "mem", m = "mrm", i = "imm", + f = "stx", F = "st0", J = "lbl", ["1"] = "1", + I = "#", S = "#", O = "#", +} + +-- Return a table/string showing all possible operand modes. +local function templatehelp(template, nparams) + if nparams == 0 then return "" end + local t = {} + for tm in gmatch(template, "[^%|]+") do + local s = map_modename[sub(tm, 1, 1)] + s = s..gsub(sub(tm, 2, nparams), ".", function(c) + return ", "..map_modename[c] + end) + if not match(s, "#") then t[#t+1] = s end + end + return t +end + +-- Match operand modes against mode match part of template. +local function matchtm(tm, args) + for i=1,#args do + if not match(args[i].mode, sub(tm, i, i)) then return end + end + return true +end + +-- Handle opcodes defined with template strings. +map_op[".template__"] = function(params, template, nparams) + if not params then return templatehelp(template, nparams) end + local args = {} + + -- Zero-operand opcodes have no match part. + if #params == 0 then + dopattern(template, args, "d", params.op, nil) + return + end + + -- Determine common operand size (coerce undefined size) or flag as mixed. + local sz, szmix, needrex + for i,p in ipairs(params) do + args[i] = parseoperand(p) + local nsz = args[i].opsize + if nsz then + if sz and sz ~= nsz then szmix = true else sz = nsz end + end + local nrex = args[i].needrex + if nrex ~= nil then + if needrex == nil then + needrex = nrex + elseif needrex ~= nrex then + werror("bad mix of byte-addressable registers") + end + end + end + + -- Try all match:pattern pairs (separated by '|'). + local gotmatch, lastpat + for tm in gmatch(template, "[^%|]+") do + -- Split off size match (starts after mode match) and pattern string. + local szm, pat = match(tm, "^(.-):(.*)$", #args+1) + if pat == "" then pat = lastpat else lastpat = pat end + if matchtm(tm, args) then + local prefix = sub(szm, 1, 1) + if prefix == "/" then -- Match both operand sizes. + if args[1].opsize == sub(szm, 2, 2) and + args[2].opsize == sub(szm, 3, 3) then + dopattern(pat, args, sz, params.op, needrex) -- Process pattern. + return + end + else -- Match common operand size. + local szp = sz + if szm == "" then szm = x64 and "qdwb" or "dwb" end -- Default sizes. + if prefix == "1" then szp = args[1].opsize; szmix = nil + elseif prefix == "2" then szp = args[2].opsize; szmix = nil end + if not szmix and (prefix == "." or match(szm, szp or "#")) then + dopattern(pat, args, szp, params.op, needrex) -- Process pattern. + return + end + end + gotmatch = true + end + end + + local msg = "bad operand mode" + if gotmatch then + if szmix then + msg = "mixed operand size" + else + msg = sz and "bad operand size" or "missing operand size" + end + end + + werror(msg.." in `"..opmodestr(params.op, args).."'") +end + +------------------------------------------------------------------------------ + +-- x64-specific opcode for 64 bit immediates and displacements. +if x64 then + function map_op.mov64_2(params) + if not params then return { "reg, imm", "reg, [disp]", "[disp], reg" } end + if secpos+2 > maxsecpos then wflush() end + local opcode, op64, sz, rex + local op64 = match(params[1], "^%[%s*(.-)%s*%]$") + if op64 then + local a = parseoperand(params[2]) + if a.mode ~= "rmR" then werror("bad operand mode") end + sz = a.opsize + rex = sz == "q" and 8 or 0 + opcode = 0xa3 + else + op64 = match(params[2], "^%[%s*(.-)%s*%]$") + local a = parseoperand(params[1]) + if op64 then + if a.mode ~= "rmR" then werror("bad operand mode") end + sz = a.opsize + rex = sz == "q" and 8 or 0 + opcode = 0xa1 + else + if sub(a.mode, 1, 1) ~= "r" or a.opsize ~= "q" then + werror("bad operand mode") + end + op64 = params[2] + opcode = 0xb8 + (a.reg%8) -- !x64: no VREG support. + rex = a.reg > 7 and 9 or 8 + end + end + wputop(sz, opcode, rex) + waction("IMM_D", format("(unsigned int)(%s)", op64)) + waction("IMM_D", format("(unsigned int)((%s)>>32)", op64)) + end +end + +------------------------------------------------------------------------------ + +-- Pseudo-opcodes for data storage. +local function op_data(params) + if not params then return "imm..." end + local sz = sub(params.op, 2, 2) + if sz == "a" then sz = addrsize end + for _,p in ipairs(params) do + local a = parseoperand(p) + if sub(a.mode, 1, 1) ~= "i" or (a.opsize and a.opsize ~= sz) then + werror("bad mode or size in `"..p.."'") + end + if a.mode == "iJ" then + wputlabel("IMM_", a.imm, 1) + else + wputszarg(sz, a.imm) + end + if secpos+2 > maxsecpos then wflush() end + end +end + +map_op[".byte_*"] = op_data +map_op[".sbyte_*"] = op_data +map_op[".word_*"] = op_data +map_op[".dword_*"] = op_data +map_op[".aword_*"] = op_data + +------------------------------------------------------------------------------ + +-- Pseudo-opcode to mark the position where the action list is to be emitted. +map_op[".actionlist_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeactions(out, name) end) +end + +-- Pseudo-opcode to mark the position where the global enum is to be emitted. +map_op[".globals_1"] = function(params) + if not params then return "prefix" end + local prefix = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobals(out, prefix) end) +end + +-- Pseudo-opcode to mark the position where the global names are to be emitted. +map_op[".globalnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeglobalnames(out, name) end) +end + +-- Pseudo-opcode to mark the position where the extern names are to be emitted. +map_op[".externnames_1"] = function(params) + if not params then return "cvar" end + local name = params[1] -- No syntax check. You get to keep the pieces. + wline(function(out) writeexternnames(out, name) end) +end + +------------------------------------------------------------------------------ + +-- Label pseudo-opcode (converted from trailing colon form). +map_op[".label_2"] = function(params) + if not params then return "[1-9] | ->global | =>pcexpr [, addr]" end + if secpos+2 > maxsecpos then wflush() end + local a = parseoperand(params[1]) + local mode, imm = a.mode, a.imm + if type(imm) == "number" and (mode == "iJ" or (imm >= 1 and imm <= 9)) then + -- Local label (1: ... 9:) or global label (->global:). + waction("LABEL_LG", nil, 1) + wputxb(imm) + elseif mode == "iJ" then + -- PC label (=>pcexpr:). + waction("LABEL_PC", imm) + else + werror("bad label definition") + end + -- SETLABEL must immediately follow LABEL_LG/LABEL_PC. + local addr = params[2] + if addr then + local a = parseoperand(addr) + if a.mode == "iPJ" then + waction("SETLABEL", a.imm) + else + werror("bad label assignment") + end + end +end +map_op[".label_1"] = map_op[".label_2"] + +------------------------------------------------------------------------------ + +-- Alignment pseudo-opcode. +map_op[".align_1"] = function(params) + if not params then return "numpow2" end + if secpos+1 > maxsecpos then wflush() end + local align = tonumber(params[1]) or map_opsizenum[map_opsize[params[1]]] + if align then + local x = align + -- Must be a power of 2 in the range (2 ... 256). + for i=1,8 do + x = x / 2 + if x == 1 then + waction("ALIGN", nil, 1) + wputxb(align-1) -- Action byte is 2**n-1. + return + end + end + end + werror("bad alignment") +end + +-- Spacing pseudo-opcode. +map_op[".space_2"] = function(params) + if not params then return "num [, filler]" end + if secpos+1 > maxsecpos then wflush() end + waction("SPACE", params[1]) + local fill = params[2] + if fill then + fill = tonumber(fill) + if not fill or fill < 0 or fill > 255 then werror("bad filler") end + end + wputxb(fill or 0) +end +map_op[".space_1"] = map_op[".space_2"] + +------------------------------------------------------------------------------ + +-- Pseudo-opcode for (primitive) type definitions (map to C types). +map_op[".type_3"] = function(params, nparams) + if not params then + return nparams == 2 and "name, ctype" or "name, ctype, reg" + end + local name, ctype, reg = params[1], params[2], params[3] + if not match(name, "^[%a_][%w_]*$") then + werror("bad type name `"..name.."'") + end + local tp = map_type[name] + if tp then + werror("duplicate type `"..name.."'") + end + if reg and not map_reg_valid_base[reg] then + werror("bad base register `"..(map_reg_rev[reg] or reg).."'") + end + -- Add #type to defines. A bit unclean to put it in map_archdef. + map_archdef["#"..name] = "sizeof("..ctype..")" + -- Add new type and emit shortcut define. + local num = ctypenum + 1 + map_type[name] = { + ctype = ctype, + ctypefmt = format("Dt%X(%%s)", num), + reg = reg, + } + wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype)) + ctypenum = num +end +map_op[".type_2"] = map_op[".type_3"] + +-- Dump type definitions. +local function dumptypes(out, lvl) + local t = {} + for name in pairs(map_type) do t[#t+1] = name end + sort(t) + out:write("Type definitions:\n") + for _,name in ipairs(t) do + local tp = map_type[name] + local reg = tp.reg and map_reg_rev[tp.reg] or "" + out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Set the current section. +function _M.section(num) + waction("SECTION") + wputxb(num) + wflush(true) -- SECTION is a terminal action. +end + +------------------------------------------------------------------------------ + +-- Dump architecture description. +function _M.dumparch(out) + out:write(format("DynASM %s version %s, released %s\n\n", + _info.arch, _info.version, _info.release)) + dumpregs(out) + dumpactions(out) +end + +-- Dump all user defined elements. +function _M.dumpdef(out, lvl) + dumptypes(out, lvl) + dumpglobals(out, lvl) + dumpexterns(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Pass callbacks from/to the DynASM core. +function _M.passcb(wl, we, wf, ww) + wline, werror, wfatal, wwarn = wl, we, wf, ww + return wflush +end + +-- Setup the arch-specific module. +function _M.setup(arch, opt) + g_arch, g_opt = arch, opt +end + +-- Merge the core maps and the arch-specific maps. +function _M.mergemaps(map_coreop, map_def) + setmetatable(map_op, { __index = map_coreop }) + setmetatable(map_def, { __index = map_archdef }) + return map_op, map_def +end + +return _M + +------------------------------------------------------------------------------ + diff --git a/src/luajit2/dynasm/dynasm.lua b/src/luajit2/dynasm/dynasm.lua new file mode 100644 index 0000000000..8ff9851324 --- /dev/null +++ b/src/luajit2/dynasm/dynasm.lua @@ -0,0 +1,1076 @@ +------------------------------------------------------------------------------ +-- DynASM. A dynamic assembler for code generation engines. +-- Originally designed and implemented for LuaJIT. +-- +-- Copyright (C) 2005-2011 Mike Pall. All rights reserved. +-- See below for full copyright notice. +------------------------------------------------------------------------------ + +-- Application information. +local _info = { + name = "DynASM", + description = "A dynamic assembler for code generation engines", + version = "1.3.0", + vernum = 10300, + release = "2011-05-05", + author = "Mike Pall", + url = "http://luajit.org/dynasm.html", + license = "MIT", + copyright = [[ +Copyright (C) 2005-2011 Mike Pall. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[ MIT license: http://www.opensource.org/licenses/mit-license.php ] +]], +} + +-- Cache library functions. +local type, pairs, ipairs = type, pairs, ipairs +local pcall, error, assert = pcall, error, assert +local _s = string +local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub +local format, rep, upper = _s.format, _s.rep, _s.upper +local _t = table +local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort +local exit = os.exit +local io = io +local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr + +------------------------------------------------------------------------------ + +-- Program options. +local g_opt = {} + +-- Global state for current file. +local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch +local g_errcount = 0 + +-- Write buffer for output file. +local g_wbuffer, g_capbuffer + +------------------------------------------------------------------------------ + +-- Write an output line (or callback function) to the buffer. +local function wline(line, needindent) + local buf = g_capbuffer or g_wbuffer + buf[#buf+1] = needindent and g_indent..line or line + g_synclineno = g_synclineno + 1 +end + +-- Write assembler line as a comment, if requestd. +local function wcomment(aline) + if g_opt.comment then + wline(g_opt.comment..aline..g_opt.endcomment, true) + end +end + +-- Resync CPP line numbers. +local function wsync() + if g_synclineno ~= g_lineno and g_opt.cpp then + wline("# "..g_lineno..' "'..g_fname..'"') + g_synclineno = g_lineno + end +end + +-- Dummy action flush function. Replaced with arch-specific function later. +local function wflush(term) +end + +-- Dump all buffered output lines. +local function wdumplines(out, buf) + for _,line in ipairs(buf) do + if type(line) == "string" then + assert(out:write(line, "\n")) + else + -- Special callback to dynamically insert lines after end of processing. + line(out) + end + end +end + +------------------------------------------------------------------------------ + +-- Emit an error. Processing continues with next statement. +local function werror(msg) + error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0) +end + +-- Emit a fatal error. Processing stops. +local function wfatal(msg) + g_errcount = "fatal" + werror(msg) +end + +-- Print a warning. Processing continues. +local function wwarn(msg) + stderr:write(format("%s:%s: warning: %s:\n%s\n", + g_fname, g_lineno, msg, g_curline)) +end + +-- Print caught error message. But suppress excessive errors. +local function wprinterr(...) + if type(g_errcount) == "number" then + -- Regular error. + g_errcount = g_errcount + 1 + if g_errcount < 21 then -- Seems to be a reasonable limit. + stderr:write(...) + elseif g_errcount == 21 then + stderr:write(g_fname, + ":*: warning: too many errors (suppressed further messages).\n") + end + else + -- Fatal error. + stderr:write(...) + return true -- Stop processing. + end +end + +------------------------------------------------------------------------------ + +-- Map holding all option handlers. +local opt_map = {} +local opt_current + +-- Print error and exit with error status. +local function opterror(...) + stderr:write("dynasm.lua: ERROR: ", ...) + stderr:write("\n") + exit(1) +end + +-- Get option parameter. +local function optparam(args) + local argn = args.argn + local p = args[argn] + if not p then + opterror("missing parameter for option `", opt_current, "'.") + end + args.argn = argn + 1 + return p +end + +------------------------------------------------------------------------------ + +-- Core pseudo-opcodes. +local map_coreop = {} +-- Dummy opcode map. Replaced by arch-specific map. +local map_op = {} + +-- Forward declarations. +local dostmt +local readfile + +------------------------------------------------------------------------------ + +-- Map for defines (initially empty, chains to arch-specific map). +local map_def = {} + +-- Pseudo-opcode to define a substitution. +map_coreop[".define_2"] = function(params, nparams) + if not params then return nparams == 1 and "name" or "name, subst" end + local name, def = params[1], params[2] or "1" + if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end + map_def[name] = def +end +map_coreop[".define_1"] = map_coreop[".define_2"] + +-- Define a substitution on the command line. +function opt_map.D(args) + local namesubst = optparam(args) + local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$") + if name then + map_def[name] = subst + elseif match(namesubst, "^[%a_][%w_]*$") then + map_def[namesubst] = "1" + else + opterror("bad define") + end +end + +-- Undefine a substitution on the command line. +function opt_map.U(args) + local name = optparam(args) + if match(name, "^[%a_][%w_]*$") then + map_def[name] = nil + else + opterror("bad define") + end +end + +-- Helper for definesubst. +local gotsubst + +local function definesubst_one(word) + local subst = map_def[word] + if subst then gotsubst = word; return subst else return word end +end + +-- Iteratively substitute defines. +local function definesubst(stmt) + -- Limit number of iterations. + for i=1,100 do + gotsubst = false + stmt = gsub(stmt, "#?[%w_]+", definesubst_one) + if not gotsubst then break end + end + if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end + return stmt +end + +-- Dump all defines. +local function dumpdefines(out, lvl) + local t = {} + for name in pairs(map_def) do + t[#t+1] = name + end + sort(t) + out:write("Defines:\n") + for _,name in ipairs(t) do + local subst = map_def[name] + if g_arch then subst = g_arch.revdef(subst) end + out:write(format(" %-20s %s\n", name, subst)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Support variables for conditional assembly. +local condlevel = 0 +local condstack = {} + +-- Evaluate condition with a Lua expression. Substitutions already performed. +local function cond_eval(cond) + local func, err = loadstring("return "..cond) + if func then + setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil. + local ok, res = pcall(func) + if ok then + if res == 0 then return false end -- Oh well. + return not not res + end + err = res + end + wfatal("bad condition: "..err) +end + +-- Skip statements until next conditional pseudo-opcode at the same level. +local function stmtskip() + local dostmt_save = dostmt + local lvl = 0 + dostmt = function(stmt) + local op = match(stmt, "^%s*(%S+)") + if op == ".if" then + lvl = lvl + 1 + elseif lvl ~= 0 then + if op == ".endif" then lvl = lvl - 1 end + elseif op == ".elif" or op == ".else" or op == ".endif" then + dostmt = dostmt_save + dostmt(stmt) + end + end +end + +-- Pseudo-opcodes for conditional assembly. +map_coreop[".if_1"] = function(params) + if not params then return "condition" end + local lvl = condlevel + 1 + local res = cond_eval(params[1]) + condlevel = lvl + condstack[lvl] = res + if not res then stmtskip() end +end + +map_coreop[".elif_1"] = function(params) + if not params then return "condition" end + if condlevel == 0 then wfatal(".elif without .if") end + local lvl = condlevel + local res = condstack[lvl] + if res then + if res == "else" then wfatal(".elif after .else") end + else + res = cond_eval(params[1]) + if res then + condstack[lvl] = res + return + end + end + stmtskip() +end + +map_coreop[".else_0"] = function(params) + if condlevel == 0 then wfatal(".else without .if") end + local lvl = condlevel + local res = condstack[lvl] + condstack[lvl] = "else" + if res then + if res == "else" then wfatal(".else after .else") end + stmtskip() + end +end + +map_coreop[".endif_0"] = function(params) + local lvl = condlevel + if lvl == 0 then wfatal(".endif without .if") end + condlevel = lvl - 1 +end + +-- Check for unfinished conditionals. +local function checkconds() + if g_errcount ~= "fatal" and condlevel ~= 0 then + wprinterr(g_fname, ":*: error: unbalanced conditional\n") + end +end + +------------------------------------------------------------------------------ + +-- Search for a file in the given path and open it for reading. +local function pathopen(path, name) + local dirsep = match(package.path, "\\") and "\\" or "/" + for _,p in ipairs(path) do + local fullname = p == "" and name or p..dirsep..name + local fin = io.open(fullname, "r") + if fin then + g_fname = fullname + return fin + end + end +end + +-- Include a file. +map_coreop[".include_1"] = function(params) + if not params then return "filename" end + local name = params[1] + -- Save state. Ugly, I know. but upvalues are fast. + local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent + -- Read the included file. + local fatal = readfile(pathopen(g_opt.include, name) or + wfatal("include file `"..name.."' not found")) + -- Restore state. + g_synclineno = -1 + g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi + if fatal then wfatal("in include file") end +end + +-- Make .include and conditionals initially available, too. +map_op[".include_1"] = map_coreop[".include_1"] +map_op[".if_1"] = map_coreop[".if_1"] +map_op[".elif_1"] = map_coreop[".elif_1"] +map_op[".else_0"] = map_coreop[".else_0"] +map_op[".endif_0"] = map_coreop[".endif_0"] + +------------------------------------------------------------------------------ + +-- Support variables for macros. +local mac_capture, mac_lineno, mac_name +local mac_active = {} +local mac_list = {} + +-- Pseudo-opcode to define a macro. +map_coreop[".macro_*"] = function(mparams) + if not mparams then return "name [, params...]" end + -- Split off and validate macro name. + local name = remove(mparams, 1) + if not name then werror("missing macro name") end + if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]+$")) then + wfatal("bad macro name `"..name.."'") + end + -- Validate macro parameter names. + local mdup = {} + for _,mp in ipairs(mparams) do + if not match(mp, "^[%a_][%w_]*$") then + wfatal("bad macro parameter name `"..mp.."'") + end + if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end + mdup[mp] = true + end + -- Check for duplicate or recursive macro definitions. + local opname = name.."_"..#mparams + if map_op[opname] or map_op[name.."_*"] then + wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)") + end + if mac_capture then wfatal("recursive macro definition") end + + -- Enable statement capture. + local lines = {} + mac_lineno = g_lineno + mac_name = name + mac_capture = function(stmt) -- Statement capture function. + -- Stop macro definition with .endmacro pseudo-opcode. + if not match(stmt, "^%s*.endmacro%s*$") then + lines[#lines+1] = stmt + return + end + mac_capture = nil + mac_lineno = nil + mac_name = nil + mac_list[#mac_list+1] = opname + -- Add macro-op definition. + map_op[opname] = function(params) + if not params then return mparams, lines end + -- Protect against recursive macro invocation. + if mac_active[opname] then wfatal("recursive macro invocation") end + mac_active[opname] = true + -- Setup substitution map. + local subst = {} + for i,mp in ipairs(mparams) do subst[mp] = params[i] end + local mcom + if g_opt.maccomment and g_opt.comment then + mcom = " MACRO "..name.." ("..#mparams..")" + wcomment("{"..mcom) + end + -- Loop through all captured statements + for _,stmt in ipairs(lines) do + -- Substitute macro parameters. + local st = gsub(stmt, "[%w_]+", subst) + st = definesubst(st) + st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b. + if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end + -- Emit statement. Use a protected call for better diagnostics. + local ok, err = pcall(dostmt, st) + if not ok then + -- Add the captured statement to the error. + wprinterr(err, "\n", g_indent, "| ", stmt, + "\t[MACRO ", name, " (", #mparams, ")]\n") + end + end + if mcom then wcomment("}"..mcom) end + mac_active[opname] = nil + end + end +end + +-- An .endmacro pseudo-opcode outside of a macro definition is an error. +map_coreop[".endmacro_0"] = function(params) + wfatal(".endmacro without .macro") +end + +-- Dump all macros and their contents (with -PP only). +local function dumpmacros(out, lvl) + sort(mac_list) + out:write("Macros:\n") + for _,opname in ipairs(mac_list) do + local name = sub(opname, 1, -3) + local params, lines = map_op[opname]() + out:write(format(" %-20s %s\n", name, concat(params, ", "))) + if lvl > 1 then + for _,line in ipairs(lines) do + out:write(" |", line, "\n") + end + out:write("\n") + end + end + out:write("\n") +end + +-- Check for unfinished macro definitions. +local function checkmacros() + if mac_capture then + wprinterr(g_fname, ":", mac_lineno, + ": error: unfinished .macro `", mac_name ,"'\n") + end +end + +------------------------------------------------------------------------------ + +-- Support variables for captures. +local cap_lineno, cap_name +local cap_buffers = {} +local cap_used = {} + +-- Start a capture. +map_coreop[".capture_1"] = function(params) + if not params then return "name" end + wflush() + local name = params[1] + if not match(name, "^[%a_][%w_]*$") then + wfatal("bad capture name `"..name.."'") + end + if cap_name then + wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno) + end + cap_name = name + cap_lineno = g_lineno + -- Create or continue a capture buffer and start the output line capture. + local buf = cap_buffers[name] + if not buf then buf = {}; cap_buffers[name] = buf end + g_capbuffer = buf + g_synclineno = 0 +end + +-- Stop a capture. +map_coreop[".endcapture_0"] = function(params) + wflush() + if not cap_name then wfatal(".endcapture without a valid .capture") end + cap_name = nil + cap_lineno = nil + g_capbuffer = nil + g_synclineno = 0 +end + +-- Dump a capture buffer. +map_coreop[".dumpcapture_1"] = function(params) + if not params then return "name" end + wflush() + local name = params[1] + if not match(name, "^[%a_][%w_]*$") then + wfatal("bad capture name `"..name.."'") + end + cap_used[name] = true + wline(function(out) + local buf = cap_buffers[name] + if buf then wdumplines(out, buf) end + end) + g_synclineno = 0 +end + +-- Dump all captures and their buffers (with -PP only). +local function dumpcaptures(out, lvl) + out:write("Captures:\n") + for name,buf in pairs(cap_buffers) do + out:write(format(" %-20s %4s)\n", name, "("..#buf)) + if lvl > 1 then + local bar = rep("=", 76) + out:write(" ", bar, "\n") + for _,line in ipairs(buf) do + out:write(" ", line, "\n") + end + out:write(" ", bar, "\n\n") + end + end + out:write("\n") +end + +-- Check for unfinished or unused captures. +local function checkcaptures() + if cap_name then + wprinterr(g_fname, ":", cap_lineno, + ": error: unfinished .capture `", cap_name,"'\n") + return + end + for name in pairs(cap_buffers) do + if not cap_used[name] then + wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n") + end + end +end + +------------------------------------------------------------------------------ + +-- Sections names. +local map_sections = {} + +-- Pseudo-opcode to define code sections. +-- TODO: Data sections, BSS sections. Needs extra C code and API. +map_coreop[".section_*"] = function(params) + if not params then return "name..." end + if #map_sections > 0 then werror("duplicate section definition") end + wflush() + for sn,name in ipairs(params) do + local opname = "."..name.."_0" + if not match(name, "^[%a][%w_]*$") or + map_op[opname] or map_op["."..name.."_*"] then + werror("bad section name `"..name.."'") + end + map_sections[#map_sections+1] = name + wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1)) + map_op[opname] = function(params) g_arch.section(sn-1) end + end + wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections)) +end + +-- Dump all sections. +local function dumpsections(out, lvl) + out:write("Sections:\n") + for _,name in ipairs(map_sections) do + out:write(format(" %s\n", name)) + end + out:write("\n") +end + +------------------------------------------------------------------------------ + +-- Load architecture-specific module. +local function loadarch(arch) + if not match(arch, "^[%w_]+$") then return "bad arch name" end + local ok, m_arch = pcall(require, "dasm_"..arch) + if not ok then return "cannot load module: "..m_arch end + g_arch = m_arch + wflush = m_arch.passcb(wline, werror, wfatal, wwarn) + m_arch.setup(arch, g_opt) + map_op, map_def = m_arch.mergemaps(map_coreop, map_def) +end + +-- Dump architecture description. +function opt_map.dumparch(args) + local name = optparam(args) + if not g_arch then + local err = loadarch(name) + if err then opterror(err) end + end + + local t = {} + for name in pairs(map_coreop) do t[#t+1] = name end + for name in pairs(map_op) do t[#t+1] = name end + sort(t) + + local out = stdout + local _arch = g_arch._info + out:write(format("%s version %s, released %s, %s\n", + _info.name, _info.version, _info.release, _info.url)) + g_arch.dumparch(out) + + local pseudo = true + out:write("Pseudo-Opcodes:\n") + for _,sname in ipairs(t) do + local name, nparam = match(sname, "^(.+)_([0-9%*])$") + if name then + if pseudo and sub(name, 1, 1) ~= "." then + out:write("\nOpcodes:\n") + pseudo = false + end + local f = map_op[sname] + local s + if nparam ~= "*" then nparam = nparam + 0 end + if nparam == 0 then + s = "" + elseif type(f) == "string" then + s = map_op[".template__"](nil, f, nparam) + else + s = f(nil, nparam) + end + if type(s) == "table" then + for _,s2 in ipairs(s) do + out:write(format(" %-12s %s\n", name, s2)) + end + else + out:write(format(" %-12s %s\n", name, s)) + end + end + end + out:write("\n") + exit(0) +end + +-- Pseudo-opcode to set the architecture. +-- Only initially available (map_op is replaced when called). +map_op[".arch_1"] = function(params) + if not params then return "name" end + local err = loadarch(params[1]) + if err then wfatal(err) end +end + +-- Dummy .arch pseudo-opcode to improve the error report. +map_coreop[".arch_1"] = function(params) + if not params then return "name" end + wfatal("duplicate .arch statement") +end + +------------------------------------------------------------------------------ + +-- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'. +map_coreop[".nop_*"] = function(params) + if not params then return "[ignored...]" end +end + +-- Pseudo-opcodes to raise errors. +map_coreop[".error_1"] = function(params) + if not params then return "message" end + werror(params[1]) +end + +map_coreop[".fatal_1"] = function(params) + if not params then return "message" end + wfatal(params[1]) +end + +-- Dump all user defined elements. +local function dumpdef(out) + local lvl = g_opt.dumpdef + if lvl == 0 then return end + dumpsections(out, lvl) + dumpdefines(out, lvl) + if g_arch then g_arch.dumpdef(out, lvl) end + dumpmacros(out, lvl) + dumpcaptures(out, lvl) +end + +------------------------------------------------------------------------------ + +-- Helper for splitstmt. +local splitlvl + +local function splitstmt_one(c) + if c == "(" then + splitlvl = ")"..splitlvl + elseif c == "[" then + splitlvl = "]"..splitlvl + elseif c == "{" then + splitlvl = "}"..splitlvl + elseif c == ")" or c == "]" or c == "}" then + if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end + splitlvl = sub(splitlvl, 2) + elseif splitlvl == "" then + return " \0 " + end + return c +end + +-- Split statement into (pseudo-)opcode and params. +local function splitstmt(stmt) + -- Convert label with trailing-colon into .label statement. + local label = match(stmt, "^%s*(.+):%s*$") + if label then return ".label", {label} end + + -- Split at commas and equal signs, but obey parentheses and brackets. + splitlvl = "" + stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one) + if splitlvl ~= "" then werror("unbalanced () or []") end + + -- Split off opcode. + local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$") + if not op then werror("bad statement syntax") end + + -- Split parameters. + local params = {} + for p in gmatch(other, "%s*(%Z+)%z?") do + params[#params+1] = gsub(p, "%s+$", "") + end + if #params > 16 then werror("too many parameters") end + + params.op = op + return op, params +end + +-- Process a single statement. +dostmt = function(stmt) + -- Ignore empty statements. + if match(stmt, "^%s*$") then return end + + -- Capture macro defs before substitution. + if mac_capture then return mac_capture(stmt) end + stmt = definesubst(stmt) + + -- Emit C code without parsing the line. + if sub(stmt, 1, 1) == "|" then + local tail = sub(stmt, 2) + wflush() + if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end + return + end + + -- Split into (pseudo-)opcode and params. + local op, params = splitstmt(stmt) + + -- Get opcode handler (matching # of parameters or generic handler). + local f = map_op[op.."_"..#params] or map_op[op.."_*"] + if not f then + if not g_arch then wfatal("first statement must be .arch") end + -- Improve error report. + for i=0,9 do + if map_op[op.."_"..i] then + werror("wrong number of parameters for `"..op.."'") + end + end + werror("unknown statement `"..op.."'") + end + + -- Call opcode handler or special handler for template strings. + if type(f) == "string" then + map_op[".template__"](params, f) + else + f(params) + end +end + +-- Process a single line. +local function doline(line) + if g_opt.flushline then wflush() end + + -- Assembler line? + local indent, aline = match(line, "^(%s*)%|(.*)$") + if not aline then + -- No, plain C code line, need to flush first. + wflush() + wsync() + wline(line, false) + return + end + + g_indent = indent -- Remember current line indentation. + + -- Emit C code (even from macros). Avoids echo and line parsing. + if sub(aline, 1, 1) == "|" then + if not mac_capture then + wsync() + elseif g_opt.comment then + wsync() + wcomment(aline) + end + dostmt(aline) + return + end + + -- Echo assembler line as a comment. + if g_opt.comment then + wsync() + wcomment(aline) + end + + -- Strip assembler comments. + aline = gsub(aline, "//.*$", "") + + -- Split line into statements at semicolons. + if match(aline, ";") then + for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end + else + dostmt(aline) + end +end + +------------------------------------------------------------------------------ + +-- Write DynASM header. +local function dasmhead(out) + out:write(format([[ +/* +** This file has been pre-processed with DynASM. +** %s +** DynASM version %s, DynASM %s version %s +** DO NOT EDIT! The original file is in "%s". +*/ + +#if DASM_VERSION != %d +#error "Version mismatch between DynASM and included encoding engine" +#endif + +]], _info.url, + _info.version, g_arch._info.arch, g_arch._info.version, + g_fname, _info.vernum)) +end + +-- Read input file. +readfile = function(fin) + g_indent = "" + g_lineno = 0 + g_synclineno = -1 + + -- Process all lines. + for line in fin:lines() do + g_lineno = g_lineno + 1 + g_curline = line + local ok, err = pcall(doline, line) + if not ok and wprinterr(err, "\n") then return true end + end + wflush() + + -- Close input file. + assert(fin == stdin or fin:close()) +end + +-- Write output file. +local function writefile(outfile) + local fout + + -- Open output file. + if outfile == nil or outfile == "-" then + fout = stdout + else + fout = assert(io.open(outfile, "w")) + end + + -- Write all buffered lines + wdumplines(fout, g_wbuffer) + + -- Close output file. + assert(fout == stdout or fout:close()) + + -- Optionally dump definitions. + dumpdef(fout == stdout and stderr or stdout) +end + +-- Translate an input file to an output file. +local function translate(infile, outfile) + g_wbuffer = {} + g_indent = "" + g_lineno = 0 + g_synclineno = -1 + + -- Put header. + wline(dasmhead) + + -- Read input file. + local fin + if infile == "-" then + g_fname = "(stdin)" + fin = stdin + else + g_fname = infile + fin = assert(io.open(infile, "r")) + end + readfile(fin) + + -- Check for errors. + if not g_arch then + wprinterr(g_fname, ":*: error: missing .arch directive\n") + end + checkconds() + checkmacros() + checkcaptures() + + if g_errcount ~= 0 then + stderr:write(g_fname, ":*: info: ", g_errcount, " error", + (type(g_errcount) == "number" and g_errcount > 1) and "s" or "", + " in input file -- no output file generated.\n") + dumpdef(stderr) + exit(1) + end + + -- Write output file. + writefile(outfile) +end + +------------------------------------------------------------------------------ + +-- Print help text. +function opt_map.help() + stdout:write("DynASM -- ", _info.description, ".\n") + stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n") + stdout:write[[ + +Usage: dynasm [OPTION]... INFILE.dasc|- + + -h, --help Display this help text. + -V, --version Display version and copyright information. + + -o, --outfile FILE Output file name (default is stdout). + -I, --include DIR Add directory to the include search path. + + -c, --ccomment Use /* */ comments for assembler lines. + -C, --cppcomment Use // comments for assembler lines (default). + -N, --nocomment Suppress assembler lines in output. + -M, --maccomment Show macro expansions as comments (default off). + + -L, --nolineno Suppress CPP line number information in output. + -F, --flushline Flush action list for every line. + + -D NAME[=SUBST] Define a substitution. + -U NAME Undefine a substitution. + + -P, --dumpdef Dump defines, macros, etc. Repeat for more output. + -A, --dumparch ARCH Load architecture ARCH and dump description. +]] + exit(0) +end + +-- Print version information. +function opt_map.version() + stdout:write(format("%s version %s, released %s\n%s\n\n%s", + _info.name, _info.version, _info.release, _info.url, _info.copyright)) + exit(0) +end + +-- Misc. options. +function opt_map.outfile(args) g_opt.outfile = optparam(args) end +function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end +function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end +function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end +function opt_map.nocomment() g_opt.comment = false end +function opt_map.maccomment() g_opt.maccomment = true end +function opt_map.nolineno() g_opt.cpp = false end +function opt_map.flushline() g_opt.flushline = true end +function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end + +------------------------------------------------------------------------------ + +-- Short aliases for long options. +local opt_alias = { + h = "help", ["?"] = "help", V = "version", + o = "outfile", I = "include", + c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment", + L = "nolineno", F = "flushline", + P = "dumpdef", A = "dumparch", +} + +-- Parse single option. +local function parseopt(opt, args) + opt_current = #opt == 1 and "-"..opt or "--"..opt + local f = opt_map[opt] or opt_map[opt_alias[opt]] + if not f then + opterror("unrecognized option `", opt_current, "'. Try `--help'.\n") + end + f(args) +end + +-- Parse arguments. +local function parseargs(args) + -- Default options. + g_opt.comment = "//|" + g_opt.endcomment = "" + g_opt.cpp = true + g_opt.dumpdef = 0 + g_opt.include = { "" } + + -- Process all option arguments. + args.argn = 1 + repeat + local a = args[args.argn] + if not a then break end + local lopt, opt = match(a, "^%-(%-?)(.+)") + if not opt then break end + args.argn = args.argn + 1 + if lopt == "" then + -- Loop through short options. + for o in gmatch(opt, ".") do parseopt(o, args) end + else + -- Long option. + parseopt(opt, args) + end + until false + + -- Check for proper number of arguments. + local nargs = #args - args.argn + 1 + if nargs ~= 1 then + if nargs == 0 then + if g_opt.dumpdef > 0 then return dumpdef(stdout) end + end + opt_map.help() + end + + -- Translate a single input file to a single output file + -- TODO: Handle multiple files? + translate(args[args.argn], g_opt.outfile) +end + +------------------------------------------------------------------------------ + +-- Add the directory dynasm.lua resides in to the Lua module search path. +local arg = arg +if arg and arg[0] then + local prefix = match(arg[0], "^(.*[/\\])") + if prefix then package.path = prefix.."?.lua;"..package.path end +end + +-- Start DynASM. +parseargs{...} + +------------------------------------------------------------------------------ + diff --git a/src/luajit2/src/buildvm.c b/src/luajit2/src/buildvm.c new file mode 100644 index 0000000000..33f87a4b37 --- /dev/null +++ b/src/luajit2/src/buildvm.c @@ -0,0 +1,506 @@ +/* +** LuaJIT VM builder. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** This is a tool to build the hand-tuned assembler code required for +** LuaJIT's bytecode interpreter. It supports a variety of output formats +** to feed different toolchains (see usage() below). +** +** This tool is not particularly optimized because it's only used while +** _building_ LuaJIT. There's no point in distributing or installing it. +** Only the object code generated by this tool is linked into LuaJIT. +** +** Caveat: some memory is not free'd, error handling is lazy. +** It's a one-shot tool -- any effort fixing this would be wasted. +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_ircall.h" +#include "lj_frame.h" +#include "lj_dispatch.h" +#if LJ_HASFFI +#include "lj_ccall.h" +#endif +#include "luajit.h" + +#if defined(_WIN32) +#include <fcntl.h> +#include <io.h> +#endif + +/* ------------------------------------------------------------------------ */ + +/* DynASM glue definitions. */ +#define Dst ctx +#define Dst_DECL BuildCtx *ctx +#define Dst_REF (ctx->D) +#define DASM_CHECKS 1 + +#include "../dynasm/dasm_proto.h" + +/* Glue macros for DynASM. */ +static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type); + +#define DASM_EXTERN(ctx, addr, idx, type) \ + collect_reloc(ctx, addr, idx, type) + +/* ------------------------------------------------------------------------ */ + +/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */ +#define DASM_ALIGNED_WRITES 1 + +/* Embed architecture-specific DynASM encoder and backend. */ +#if LJ_TARGET_X86 +#include "../dynasm/dasm_x86.h" +#include "buildvm_x86.h" +#elif LJ_TARGET_X64 +#include "../dynasm/dasm_x86.h" +#if LJ_ABI_WIN +#include "buildvm_x64win.h" +#else +#include "buildvm_x64.h" +#endif +#elif LJ_TARGET_ARM +#include "../dynasm/dasm_arm.h" +#include "buildvm_arm.h" +#elif LJ_TARGET_PPCSPE +#include "../dynasm/dasm_ppc.h" +#include "buildvm_ppcspe.h" +#else +#error "No support for this architecture (yet)" +#endif + +/* ------------------------------------------------------------------------ */ + +void owrite(BuildCtx *ctx, const void *ptr, size_t sz) +{ + if (fwrite(ptr, 1, sz, ctx->fp) != sz) { + fprintf(stderr, "Error: cannot write to output file: %s\n", + strerror(errno)); + exit(1); + } +} + +/* ------------------------------------------------------------------------ */ + +/* Emit code as raw bytes. Only used for DynASM debugging. */ +static void emit_raw(BuildCtx *ctx) +{ + owrite(ctx, ctx->code, ctx->codesz); +} + +/* -- Build machine code -------------------------------------------------- */ + +static const char *sym_decorate(BuildCtx *ctx, + const char *prefix, const char *suffix) +{ + char name[256]; + char *p; +#if LJ_64 + const char *symprefix = ctx->mode == BUILD_machasm ? "_" : ""; +#else + const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : ""; +#endif + sprintf(name, "%s%s%s", symprefix, prefix, suffix); + p = strchr(name, '@'); + if (p) { + if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj)) + name[0] = '@'; + else + *p = '\0'; + } + p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */ + strcpy(p, name); + return p; +} + +#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1) + +static int relocmap[NRELOCSYM]; + +/* Collect external relocations. */ +static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type) +{ + if (ctx->nreloc >= BUILD_MAX_RELOC) { + fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n"); + exit(1); + } + if (relocmap[idx] < 0) { + relocmap[idx] = ctx->nrelocsym; + ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]); + ctx->nrelocsym++; + } + ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code); + ctx->reloc[ctx->nreloc].sym = relocmap[idx]; + ctx->reloc[ctx->nreloc].type = type; + ctx->nreloc++; + return 0; /* Encode symbol offset of 0. */ +} + +/* Naive insertion sort. Performance doesn't matter here. */ +static void sym_insert(BuildCtx *ctx, int32_t ofs, + const char *prefix, const char *suffix) +{ + ptrdiff_t i = ctx->nsym++; + while (i > 0) { + if (ctx->sym[i-1].ofs <= ofs) + break; + ctx->sym[i] = ctx->sym[i-1]; + i--; + } + ctx->sym[i].ofs = ofs; + ctx->sym[i].name = sym_decorate(ctx, prefix, suffix); +} + +/* Build the machine code. */ +static int build_code(BuildCtx *ctx) +{ + int status; + int i; + + /* Initialize DynASM structures. */ + ctx->nglob = GLOB__MAX; + ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *)); + memset(ctx->glob, 0, ctx->nglob*sizeof(void *)); + ctx->nreloc = 0; + + ctx->globnames = globnames; + ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *)); + ctx->nrelocsym = 0; + for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1; + + ctx->dasm_ident = DASM_IDENT; + ctx->dasm_arch = DASM_ARCH; + + dasm_init(Dst, DASM_MAXSECTION); + dasm_setupglobal(Dst, ctx->glob, ctx->nglob); + dasm_setup(Dst, build_actionlist); + + /* Call arch-specific backend to emit the code. */ + ctx->npc = build_backend(ctx); + + /* Finalize the code. */ + (void)dasm_checkstep(Dst, -1); + if ((status = dasm_link(Dst, &ctx->codesz))) return status; + ctx->code = (uint8_t *)malloc(ctx->codesz); + if ((status = dasm_encode(Dst, (void *)ctx->code))) return status; + + /* Allocate symbol table and bytecode offsets. */ + ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin"); + ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym)); + ctx->nsym = 0; + ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t)); + + /* Collect the opcodes (PC labels). */ + for (i = 0; i < ctx->npc; i++) { + int32_t ofs = dasm_getpclabel(Dst, i); + if (ofs < 0) return 0x22000000|i; + ctx->bc_ofs[i] = ofs; + if ((LJ_HASJIT || + !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP || + i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) && + (LJ_HASFFI || i != BC_KCDATA)) + sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]); + } + + /* Collect the globals (named labels). */ + for (i = 0; i < ctx->nglob; i++) { + const char *gl = globnames[i]; + int len = (int)strlen(gl); + if (!ctx->glob[i]) { + fprintf(stderr, "Error: undefined global %s\n", gl); + exit(2); + } + /* Skip the _Z symbols. */ + if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z')) + sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code), + LABEL_PREFIX, globnames[i]); + } + + /* Close the address range. */ + sym_insert(ctx, (int32_t)ctx->codesz, "", ""); + ctx->nsym--; + + dasm_free(Dst); + + return 0; +} + +/* -- Generate VM enums --------------------------------------------------- */ + +const char *const bc_names[] = { +#define BCNAME(name, ma, mb, mc, mt) #name, +BCDEF(BCNAME) +#undef BCNAME + NULL +}; + +const char *const ir_names[] = { +#define IRNAME(name, m, m1, m2) #name, +IRDEF(IRNAME) +#undef IRNAME + NULL +}; + +const char *const irt_names[] = { +#define IRTNAME(name) #name, +IRTDEF(IRTNAME) +#undef IRTNAME + NULL +}; + +const char *const irfpm_names[] = { +#define FPMNAME(name) #name, +IRFPMDEF(FPMNAME) +#undef FPMNAME + NULL +}; + +const char *const irfield_names[] = { +#define FLNAME(name, ofs) #name, +IRFLDEF(FLNAME) +#undef FLNAME + NULL +}; + +const char *const ircall_names[] = { +#define IRCALLNAME(name, nargs, kind, type, flags) #name, +IRCALLDEF(IRCALLNAME) +#undef IRCALLNAME + NULL +}; + +static const char *const trace_errors[] = { +#define TREDEF(name, msg) msg, +#include "lj_traceerr.h" + NULL +}; + +static const char *lower(char *buf, const char *s) +{ + char *p = buf; + while (*s) { + *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s; + s++; + } + *p = '\0'; + return buf; +} + +/* Emit C source code for bytecode-related definitions. */ +static void emit_bcdef(BuildCtx *ctx) +{ + int i; + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n"); + for (i = 0; i < ctx->npc; i++) { + if (i != 0) + fprintf(ctx->fp, ",\n"); + fprintf(ctx->fp, "%d", ctx->bc_ofs[i]); + } +} + +/* Emit VM definitions as Lua code for debug modules. */ +static void emit_vmdef(BuildCtx *ctx) +{ + char buf[80]; + int i; + fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n"); + fprintf(ctx->fp, "module(...)\n\n"); + + fprintf(ctx->fp, "bcnames = \""); + for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]); + fprintf(ctx->fp, "\"\n\n"); + + fprintf(ctx->fp, "irnames = \""); + for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]); + fprintf(ctx->fp, "\"\n\n"); + + fprintf(ctx->fp, "irfpm = { [0]="); + for (i = 0; irfpm_names[i]; i++) + fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i])); + fprintf(ctx->fp, "}\n\n"); + + fprintf(ctx->fp, "irfield = { [0]="); + for (i = 0; irfield_names[i]; i++) { + char *p; + lower(buf, irfield_names[i]); + p = strchr(buf, '_'); + if (p) *p = '.'; + fprintf(ctx->fp, "\"%s\", ", buf); + } + fprintf(ctx->fp, "}\n\n"); + + fprintf(ctx->fp, "ircall = {\n[0]="); + for (i = 0; ircall_names[i]; i++) + fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]); + fprintf(ctx->fp, "}\n\n"); + + fprintf(ctx->fp, "traceerr = {\n[0]="); + for (i = 0; trace_errors[i]; i++) + fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]); + fprintf(ctx->fp, "}\n\n"); +} + +/* -- Argument parsing ---------------------------------------------------- */ + +/* Build mode names. */ +static const char *const modenames[] = { +#define BUILDNAME(name) #name, +BUILDDEF(BUILDNAME) +#undef BUILDNAME + NULL +}; + +/* Print usage information and exit. */ +static void usage(void) +{ + int i; + fprintf(stderr, LUAJIT_VERSION " VM builder.\n"); + fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n"); + fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n"); + fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n"); + fprintf(stderr, "Available modes:\n"); + for (i = 0; i < BUILD__MAX; i++) + fprintf(stderr, " %s\n", modenames[i]); + exit(1); +} + +/* Parse the output mode name. */ +static BuildMode parsemode(const char *mode) +{ + int i; + for (i = 0; modenames[i]; i++) + if (!strcmp(mode, modenames[i])) + return (BuildMode)i; + usage(); + return (BuildMode)-1; +} + +/* Parse arguments. */ +static void parseargs(BuildCtx *ctx, char **argv) +{ + const char *a; + int i; + ctx->mode = (BuildMode)-1; + ctx->outname = "-"; + for (i = 1; (a = argv[i]) != NULL; i++) { + if (a[0] != '-') + break; + switch (a[1]) { + case '-': + if (a[2]) goto err; + i++; + goto ok; + case '\0': + goto ok; + case 'm': + i++; + if (a[2] || argv[i] == NULL) goto err; + ctx->mode = parsemode(argv[i]); + break; + case 'o': + i++; + if (a[2] || argv[i] == NULL) goto err; + ctx->outname = argv[i]; + break; + default: err: + usage(); + break; + } + } +ok: + ctx->args = argv+i; + if (ctx->mode == (BuildMode)-1) goto err; +} + +int main(int argc, char **argv) +{ + BuildCtx ctx_; + BuildCtx *ctx = &ctx_; + int status, binmode; + + if (sizeof(void *) != 4*LJ_32+8*LJ_64) { + fprintf(stderr,"Error: pointer size mismatch in cross-build.\n"); + fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=... TARGET=...\n\n"); + return 1; + } + + UNUSED(argc); + parseargs(ctx, argv); + + if ((status = build_code(ctx))) { + fprintf(stderr,"Error: DASM error %08x\n", status); + return 1; + } + + switch (ctx->mode) { + case BUILD_peobj: + case BUILD_raw: + binmode = 1; + break; + default: + binmode = 0; + break; + } + + if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') { + ctx->fp = stdout; +#if defined(_WIN32) + if (binmode) + _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */ +#endif + } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) { + fprintf(stderr, "Error: cannot open output file '%s': %s\n", + ctx->outname, strerror(errno)); + exit(1); + } + + switch (ctx->mode) { + case BUILD_elfasm: + case BUILD_coffasm: + case BUILD_machasm: + emit_asm(ctx); + emit_asm_debug(ctx); + break; + case BUILD_peobj: + emit_peobj(ctx); + break; + case BUILD_raw: + emit_raw(ctx); + break; + case BUILD_bcdef: + emit_bcdef(ctx); + emit_lib(ctx); + break; + case BUILD_vmdef: + emit_vmdef(ctx); + emit_lib(ctx); + break; + case BUILD_ffdef: + case BUILD_libdef: + case BUILD_recdef: + emit_lib(ctx); + break; + case BUILD_folddef: + emit_fold(ctx); + break; + default: + break; + } + + fflush(ctx->fp); + if (ferror(ctx->fp)) { + fprintf(stderr, "Error: cannot write to output file: %s\n", + strerror(errno)); + exit(1); + } + fclose(ctx->fp); + + return 0; +} + diff --git a/src/luajit2/src/buildvm.h b/src/luajit2/src/buildvm.h new file mode 100644 index 0000000000..a24d94531a --- /dev/null +++ b/src/luajit2/src/buildvm.h @@ -0,0 +1,104 @@ +/* +** LuaJIT VM builder. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _BUILDVM_H +#define _BUILDVM_H + +#include <sys/types.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include "lj_def.h" +#include "lj_arch.h" + +/* Hardcoded limits. Increase as needed. */ +#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */ +#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */ + +/* Prefix for scanned library definitions. */ +#define LIBDEF_PREFIX "LJLIB_" + +/* Prefix for scanned fold definitions. */ +#define FOLDDEF_PREFIX "LJFOLD" + +/* Prefixes for generated labels. */ +#define LABEL_PREFIX "lj_" +#define LABEL_PREFIX_BC LABEL_PREFIX "BC_" +#define LABEL_PREFIX_FF LABEL_PREFIX "ff_" +#define LABEL_PREFIX_CF LABEL_PREFIX "cf_" +#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_" +#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_" +#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_" + +/* Forward declaration. */ +struct dasm_State; + +/* Build modes. */ +#define BUILDDEF(_) \ + _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \ + _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \ + _(folddef) + +typedef enum { +#define BUILDENUM(name) BUILD_##name, +BUILDDEF(BUILDENUM) +#undef BUILDENUM + BUILD__MAX +} BuildMode; + +/* Code relocation. */ +typedef struct BuildReloc { + int32_t ofs; + int sym; + int type; +} BuildReloc; + +typedef struct BuildSym { + const char *name; + int32_t ofs; +} BuildSym; + +/* Build context structure. */ +typedef struct BuildCtx { + /* DynASM state pointer. Should be first member. */ + struct dasm_State *D; + /* Parsed command line. */ + BuildMode mode; + FILE *fp; + const char *outname; + char **args; + /* Code and symbols generated by DynASM. */ + uint8_t *code; + size_t codesz; + int npc, nglob, nsym, nreloc, nrelocsym; + void **glob; + BuildSym *sym; + const char **relocsym; + int32_t *bc_ofs; + const char *beginsym; + /* Strings generated by DynASM. */ + const char *const *globnames; + const char *dasm_ident; + const char *dasm_arch; + /* Relocations. */ + BuildReloc reloc[BUILD_MAX_RELOC]; +} BuildCtx; + +extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz); +extern void emit_asm(BuildCtx *ctx); +extern void emit_peobj(BuildCtx *ctx); +extern void emit_lib(BuildCtx *ctx); +extern void emit_fold(BuildCtx *ctx); + +extern const char *const bc_names[]; +extern const char *const ir_names[]; +extern const char *const irt_names[]; +extern const char *const irfpm_names[]; +extern const char *const irfield_names[]; +extern const char *const ircall_names[]; + +#endif diff --git a/src/luajit2/src/buildvm_arm.dasc b/src/luajit2/src/buildvm_arm.dasc new file mode 100644 index 0000000000..fd7c7b8117 --- /dev/null +++ b/src/luajit2/src/buildvm_arm.dasc @@ -0,0 +1,4043 @@ +|// Low-level VM code for ARM CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +| +|.arch arm +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +| +|// The following must be C callee-save. +|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding. +|.define KBASE, r5 // Constants of current Lua function. +|.define PC, r6 // Next PC. +|.define DISPATCH, r7 // Opcode dispatch table. +|.define LREG, r8 // Register holding lua_State (also in SAVE_L). +| +|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+. +|.define BASE, r9 // Base of current Lua stack frame. +| +|// The following temporaries are not saved across C calls, except for RA/RC. +|.define RA, r10 // Callee-save. +|.define RC, r11 // Callee-save. +|.define RB, r12 +|.define OP, r12 // Overlaps RB, must not be lr. +|.define INS, lr +| +|// Calling conventions. Also used as temporaries. +|.define CARG1, r0 +|.define CARG2, r1 +|.define CARG3, r2 +|.define CARG4, r3 +|.define CARG12, r0 // For 1st soft-fp double. +|.define CARG34, r2 // For 2nd soft-fp double. +| +|.define CRET1, r0 +|.define CRET2, r1 +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|.define CFRAME_SPACE, #28 +|.define SAVE_ERRF, [sp, #24] +|.define SAVE_NRES, [sp, #20] +|.define SAVE_CFRAME, [sp, #16] +|.define SAVE_L, [sp, #12] +|.define SAVE_PC, [sp, #8] +|.define SAVE_MULTRES, [sp, #4] +|.define ARG5, [sp] +| +|.define TMPDhi, [sp, #4] +|.define TMPDlo, [sp] +|.define TMPD, [sp] +|.define TMPDp, sp +| +|.macro saveregs +| push {r4, r5, r6, r7, r8, r9, r10, r11, lr} +| sub sp, sp, CFRAME_SPACE +|.endmacro +|.macro restoreregs_ret +| add sp, sp, CFRAME_SPACE +| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} +|.endmacro +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +| +|//----------------------------------------------------------------------- +| +|// Trap for not-yet-implemented parts. +|.macro NYI; ud; .endmacro +| +|//----------------------------------------------------------------------- +| +|// Access to frame relative to BASE. +|.define FRAME_FUNC, #-8 +|.define FRAME_PC, #-4 +| +|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro +|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro +|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro +|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro +|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro +| +|// Instruction fetch. +|.macro ins_NEXT1 +| ldrb OP, [PC] +|.endmacro +|.macro ins_NEXT2 +| ldr INS, [PC], #4 +|.endmacro +|// Instruction decode+dispatch. +|.macro ins_NEXT3 +| ldr OP, [DISPATCH, OP, lsl #2] +| decode_RA8 RA, INS +| decode_RD RC, INS +| bx OP +|.endmacro +|.macro ins_NEXT +| ins_NEXT1 +| ins_NEXT2 +| ins_NEXT3 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +| .define ins_next1, ins_NEXT1 +| .define ins_next2, ins_NEXT2 +| .define ins_next3, ins_NEXT3 +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next1 +| .endmacro +| .macro ins_next2 +| .endmacro +| .macro ins_next3 +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Avoid register name substitution for field name. +#define field_pc pc +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| ldr PC, LFUNC:CARG3->field_pc +| ldrb OP, [PC] // STALL: load PC. early PC. +| ldr INS, [PC], #4 +| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP. +| decode_RA8 RA, INS +| add RA, RA, BASE +| bx OP +|.endmacro +| +|.macro ins_call +| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| str PC, [BASE, FRAME_PC] +| ins_callt // STALL: locked PC. +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro +|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro +|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro +|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro hotcheck +| lsr CARG1, PC, #1 +| and CARG1, CARG1, #126 +| sub CARG1, CARG1, #-GG_DISP2HOT +| ldrh CARG2, [DISPATCH, CARG1] +| subs CARG2, CARG2, #1 +| strh CARG2, [DISPATCH, CARG1] +|.endmacro +| +|.macro hotloop +| hotcheck +| beq ->vm_hotloop +|.endmacro +| +|.macro hotcall +| hotcheck +| beq ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro +|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp +| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)] +| bic mark, mark, #LJ_GC_BLACK // black2gray(tab) +| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)] +| strb mark, tab->marked +| str tmp, tab->gclist +|.endmacro +| +|.macro IOS, a, b +||if (LJ_TARGET_OSX) { +| a, b +||} +|.endmacro +| +|//----------------------------------------------------------------------- + +#if !LJ_DUALNUM +#error "Only dual-number mode supported for ARM target" +#endif + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: RB = previous base. + | tst PC, #FRAME_P + | beq ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame. + | mvn CARG2, #~LJ_TTRUE + | mov BASE, RB + | // Prepending may overwrite the pcall frame, so do it at the end. + | str CARG2, [RA, FRAME_PC] // Prepend true to results. + | sub RA, RA, #8 + | + |->vm_returnc: + | add RC, RC, #8 // RC = (nresults+1)*8. + | ands CARG1, PC, #FRAME_TYPE + | str RC, SAVE_MULTRES + | beq ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return + | // CARG1 = PC & FRAME_TYPE + | bic RB, PC, #FRAME_TYPEP + | cmp CARG1, #FRAME_C + | sub RB, BASE, RB // RB = previous base. + | bne ->vm_returnp + | + | str RB, L->base + | ldr KBASE, SAVE_NRES + | mv_vmstate CARG4, C + | sub BASE, BASE, #8 + | subs CARG3, RC, #8 + | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8 + | st_vmstate CARG4 + | beq >2 + |1: + | subs CARG3, CARG3, #8 + | ldrd CARG12, [RA], #8 + | strd CARG12, [BASE], #8 + | bne <1 + |2: + | cmp KBASE, RC // More/less results wanted? + | bne >6 + |3: + | str BASE, L->top // Store new top. + | + |->vm_leave_cp: + | ldr RC, SAVE_CFRAME // Restore previous C frame. + | mov CRET1, #0 // Ok return status for vm_pcall. + | str RC, L->cframe + | + |->vm_leave_unw: + | restoreregs_ret + | + |6: + | blt >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | ldr CARG3, L->maxstack + | mvn CARG2, #~LJ_TNIL + | cmp BASE, CARG3 + | bhs >8 + | str CARG2, [BASE, #4] + | add RC, RC, #8 + | add BASE, BASE, #8 + | b <2 + | + |7: // Less results wanted. + | sub CARG1, RC, KBASE + | cmp KBASE, #0 // LUA_MULTRET+1 case? + | subne BASE, BASE, CARG1 // Either keep top or shrink it. + | b <3 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | str BASE, L->top // Save current top held in BASE (yes). + | mov CARG2, KBASE + | mov CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->top // Need the (realloced) L->top in BASE. + | b <2 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mov sp, CARG1 + | mov CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | mv_vmstate CARG4, C + | ldr GL:CARG3, L->glref + | str CARG4, GL:CARG3->vmstate + | b ->vm_leave_unw + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. + | mov sp, CARG1 + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | ldr L, SAVE_L + | mov MASKR8, #255 + | mov RC, #16 // 2 results: false + error message. + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | ldr BASE, L->base + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | mvn CARG1, #~LJ_TFALSE + | sub RA, BASE, #8 // Results start at BASE-8. + | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame. + | add DISPATCH, DISPATCH, #GG_G2DISP + | mv_vmstate CARG2, INTERP + | str CARG1, [BASE, #-4] // Prepend false to error message. + | st_vmstate CARG2 + | b ->vm_returnc + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | // CARG1 = L + | mov CARG2, #LUA_MINSTACK + | b >2 + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | add RC, BASE, RC + | sub RA, RA, BASE + | mov CARG1, L + | str BASE, L->base + | add PC, PC, #4 // Must point after first instruction. + | str RC, L->top + | lsr CARG3, RA, #3 + |2: + | // L->base = new base, L->top = top + | str PC, SAVE_PC + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | ldr RC, L->top + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, RC, BASE + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mov L, CARG1 + | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table. + | mov BASE, CARG2 + | add DISPATCH, DISPATCH, #GG_G2DISP + | str L, SAVE_L + | mov PC, #FRAME_CP + | str CARG3, SAVE_NRES + | add CARG2, sp, #CFRAME_RESUME + | ldrb CARG1, L->status + | str CARG3, SAVE_ERRF + | str CARG2, L->cframe + | str CARG3, SAVE_CFRAME + | cmp CARG1, #0 + | str L, SAVE_PC // Any value outside of bytecode is ok. + | beq >3 + | + | // Resume after yield (like a return). + | mov RA, BASE + | ldr BASE, L->base + | ldr CARG1, L->top + | mov MASKR8, #255 + | strb CARG3, L->status + | sub RC, CARG1, BASE + | ldr PC, [BASE, FRAME_PC] + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | mv_vmstate CARG2, INTERP + | add RC, RC, #8 + | ands CARG1, PC, #FRAME_TYPE + | st_vmstate CARG2 + | str RC, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PC, #FRAME_CP + | str CARG4, SAVE_ERRF + | b >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PC, #FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | ldr RC, L:CARG1->cframe + | str CARG3, SAVE_NRES + | mov L, CARG1 + | str CARG1, SAVE_L + | mov BASE, CARG2 + | str sp, L->cframe // Add our C frame to cframe chain. + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | str RC, SAVE_CFRAME + | add DISPATCH, DISPATCH, #GG_G2DISP + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | ldr RB, L->base // RB = old base (for vmeta_call). + | ldr CARG1, L->top + | mov MASKR8, #255 + | add PC, PC, BASE + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | sub PC, PC, RB // PC = frame delta + frame type + | mv_vmstate CARG2, INTERP + | sub NARGS8:RC, CARG1, BASE + | st_vmstate CARG2 + | + |->vm_call_dispatch: + | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC + | ldrd CARG34, [BASE, FRAME_FUNC] + | checkfunc CARG4, ->vmeta_call + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mov L, CARG1 + | ldr RA, L:CARG1->stack + | str CARG1, SAVE_L + | ldr RB, L->top + | str CARG1, SAVE_PC // Any value outside of bytecode is ok. + | ldr RC, L->cframe + | sub RA, RA, RB // Compute -savestack(L, L->top). + | str sp, L->cframe // Add our C frame to cframe chain. + | mov RB, #0 + | str RA, SAVE_NRES // Neg. delta means cframe w/o frame. + | str RB, SAVE_ERRF // No error function. + | str RC, SAVE_CFRAME + | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud) + | ldr DISPATCH, L->glref // Setup pointer to dispatch table. + | movs BASE, CRET1 + | mov PC, #FRAME_CP + | add DISPATCH, DISPATCH, #GG_G2DISP + | bne <3 // Else continue with the call. + | b ->vm_leave_cp // No base? Just remove C frame. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8 + | ldr LFUNC:CARG3, [RB, FRAME_FUNC] + | ldr CARG1, [BASE, #-16] // Get continuation. + | mov CARG4, BASE + | mov BASE, RB // Restore caller BASE. + | cmp CARG1, #0 + | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC]. + | beq >1 + | ldr CARG3, LFUNC:CARG3->field_pc + | mvn INS, #~LJ_TNIL + | add CARG2, RA, RC + | str INS, [CARG2, #-4] // Ensure one valid arg. + | ldr KBASE, [CARG3, #PC2PROTO(k)] + | // BASE = base, RA = resultptr, CARG4 = meta base + | bx CARG1 + | + |1: // Tail call from C function. + | ldr CARG3, [BASE, FRAME_FUNC] + | sub CARG4, CARG4, #16 + | sub RC, CARG4, BASE + | b ->vm_call_tail + | + |->cont_cat: // RA = resultptr, CARG4 = meta base + | ldr INS, [PC, #-4] + | sub CARG2, CARG4, #16 + | ldrd CARG34, [RA] + | str BASE, L->base + | decode_RB8 RC, INS + | decode_RA8 RA, INS + | add CARG1, BASE, RC + | subs CARG1, CARG2, CARG1 + | strdne CARG34, [CARG2] + | movne CARG3, CARG1 + | bne ->BC_CAT_Z + | strd CARG34, [BASE, RA] + | b ->cont_nop + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | add CARG2, BASE, RB + | b >2 + | + |->vmeta_tgets: + | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) + | mvn CARG4, #~LJ_TTAB + | str TAB:RB, [CARG2] + | str CARG4, [CARG2, #4] + |2: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tgetb: // RC = index + | decode_RB8 RB, INS + | str RC, TMPDlo + | mvn CARG4, #~LJ_TISNUM + | add CARG2, BASE, RB + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tgetv: + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | IOS ldr BASE, L->base + | cmp CRET1, #0 + | beq >3 + | ldrd CARG34, [CRET1] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | rsb CARG1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #16 // 2 args for func(t, k). + | str PC, [BASE, #-12] // [cont|PC] + | add PC, CARG1, BASE + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | b ->vm_call_dispatch_f + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | add CARG2, BASE, RB + | b >2 + | + |->vmeta_tsets: + | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv) + | mvn CARG4, #~LJ_TTAB + | str TAB:RB, [CARG2] + | str CARG4, [CARG2, #4] + |2: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tsetb: // RC = index + | decode_RB8 RB, INS + | str RC, TMPDlo + | mvn CARG4, #~LJ_TISNUM + | add CARG2, BASE, RB + | str CARG4, TMPDhi + | mov CARG3, TMPDp + | b >1 + | + |->vmeta_tsetv: + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | IOS ldr BASE, L->base + | cmp CRET1, #0 + | ldrd CARG34, [BASE, RA] + | beq >3 + | ins_next1 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | strd CARG34, [CRET1] + | ins_next2 + | ins_next3 + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | rsb CARG1, BASE, #FRAME_CONT + | ldr BASE, L->top + | mov NARGS8:RC, #24 // 3 args for func(t, k, v). + | strd CARG34, [BASE, #16] // Copy value to third argument. + | str PC, [BASE, #-12] // [cont|PC] + | add PC, CARG1, BASE + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | b ->vm_call_dispatch_f + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | mov CARG1, L + | sub PC, PC, #4 + | mov CARG2, RA + | str BASE, L->base + | mov CARG3, RC + | str PC, SAVE_PC + | decode_OP CARG4, INS + | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // Returns 0/1 or TValue * (metamethod). + |3: + | IOS ldr BASE, L->base + | cmp CRET1, #1 + | bhi ->vmeta_binop + |4: + | ldrh RB, [PC, #2] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | subhs PC, RB, #0x20000 + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | ldr INS, [PC, #-4] + | ldrd CARG12, [RA] + | decode_RA8 CARG3, INS + | strd CARG12, [BASE, CARG3] + | b ->cont_nop + | + |->cont_condt: // RA = resultptr + | ldr CARG2, [RA, #4] + | mvn CARG1, #~LJ_TTRUE + | cmp CARG1, CARG2 // Branch if result is true. + | b <4 + | + |->cont_condf: // RA = resultptr + | ldr CARG2, [RA, #4] + | checktp CARG2, LJ_TFALSE // Branch if result is false. + | b <4 + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + | + |->vmeta_equal_cd: +#if LJ_HASFFI + | sub PC, PC, #4 + | str BASE, L->base + | mov CARG1, L + | mov CARG2, INS + | str PC, SAVE_PC + | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op) + | // Returns 0/1 or TValue * (metamethod). + | b <3 +#endif + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vn: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG3, BASE, RB + | add CARG4, KBASE, RC + | b >1 + | + |->vmeta_arith_nv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG4, BASE, RB + | add CARG3, KBASE, RC + | b >1 + | + |->vmeta_unm: + | ldr INS, [PC, #-8] + | sub PC, PC, #4 + | add CARG3, BASE, RC + | add CARG4, BASE, RC + | b >1 + | + |->vmeta_arith_vv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG3, BASE, RB + | add CARG4, BASE, RC + |1: + | decode_OP OP, INS + | add CARG2, BASE, RA + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | str OP, ARG5 + | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // Returns NULL (finished) or TValue * (metamethod). + | IOS ldr BASE, L->base + | cmp CRET1, #0 + | beq ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | sub CARG2, CRET1, BASE + | str PC, [CRET1, #-12] // [cont|PC] + | add PC, CARG2, #FRAME_CONT + | mov BASE, CRET1 + | mov NARGS8:RC, #16 // 2 args for func(o1, o2). + | b ->vm_call_dispatch + | + |->vmeta_len: + | add CARG2, BASE, RC + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_meta_len // (lua_State *L, TValue *o) + | // Returns NULL (retry) or TValue * (metamethod base). + | IOS ldr BASE, L->base +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmp CRET1, #0 + | bne ->vmeta_binop // Binop call for compatibility. + | ldr TAB:CARG1, [BASE, RC] + | b ->BC_LEN_Z +#else + | b ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // RB = old base, BASE = new base, RC = nargs*8 + | mov CARG1, L + | str RB, L->base // This is the callers base! + | sub CARG2, BASE, #8 + | str PC, SAVE_PC + | add CARG3, BASE, NARGS8:RC + | IOS mov RA, BASE + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | IOS mov BASE, RA + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | mov CARG1, L + | str BASE, L->base + | sub CARG2, RA, #8 + | str PC, SAVE_PC + | add CARG3, RA, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | IOS ldr BASE, L->base + | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here. + | ldr PC, [BASE, FRAME_PC] + | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now. + | b ->BC_CALLT2_Z + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, RA + | str PC, SAVE_PC + | bl extern lj_meta_for // (lua_State *L, TValue *base) + | IOS ldr BASE, L->base +#if LJ_HASJIT + | ldrb OP, [PC, #-4] +#endif + | ldr INS, [PC, #-4] +#if LJ_HASJIT + | cmp OP, #BC_JFORI +#endif + | decode_RA8 RA, INS + | decode_RD RC, INS +#if LJ_HASJIT + | beq =>BC_JFORI +#endif + | b =>BC_FORI + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | ldrd CARG12, [BASE] + | ldrd CARG34, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc_1 name + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc_2 name + | checktp CARG2, LJ_TISNUM + | cmnlo CARG4, #-LJ_TISNUM + | bhs ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2. + |.macro ffgccheck + | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)] + | cmp CARG1, CARG2 + | blge ->fff_gcstep + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | checktp CARG2, LJ_TTRUE + | bhi ->fff_fallback + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + | mov RB, BASE + | subs RA, NARGS8:RC, #8 + | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8. + | beq ->fff_res // Done if exactly 1 argument. + |1: + | ldrd CARG12, [RB, #8] + | subs RA, RA, #8 + | strd CARG12, [RB], #8 + | bne <1 + | b ->fff_res + | + |.ffunc type + | ldr CARG2, [BASE, #4] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | mvnlo CARG2, #~LJ_TISNUM + | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1 + | lsl CARG4, CARG4, #3 + | ldrd CARG12, [CFUNC:CARG3, CARG4] + | b ->fff_restv + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | checktp CARG2, LJ_TTAB + | cmnne CARG2, #-LJ_TUDATA + | bne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | ldr TAB:RB, TAB:CARG1->metatable + |2: + | mvn CARG2, #~LJ_TNIL + | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])] + | cmp TAB:RB, #0 + | beq ->fff_restv + | ldr CARG3, TAB:RB->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:RB->node + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + |3: // Rearranged logic, because we expect _not_ to find the key. + | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS. + | ldrd CARG12, NODE:INS->val + | ldr NODE:INS, NODE:INS->next + | cmp CARG3, STR:RC + | checktpeq CARG4, LJ_TSTR + | beq >5 + | cmp NODE:INS, #0 + | bne <3 + |4: + | mov CARG1, RB // Use metatable as default result. + | mvn CARG2, #~LJ_TTAB + | b ->fff_restv + |5: + | checktp CARG2, LJ_TNIL + | bne ->fff_restv + | b <4 + | + |6: + | checktp CARG2, LJ_TISNUM + | mvnhs CARG2, CARG2 + | movlo CARG2, #~LJ_TISNUM + | add CARG4, DISPATCH, CARG2, lsl #2 + | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])] + | b <2 + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | checktp CARG2, LJ_TTAB + | ldreq TAB:RB, TAB:CARG1->metatable + | checktpeq CARG4, LJ_TTAB + | ldrbeq CARG4, TAB:CARG1->marked + | cmpeq TAB:RB, #0 + | bne ->fff_fallback + | tst CARG4, #LJ_GC_BLACK // isblack(table) + | str TAB:CARG3, TAB:CARG1->metatable + | beq ->fff_restv + | barrierback TAB:CARG1, CARG4, CARG3 + | b ->fff_restv + | + |.ffunc rawget + | ldrd CARG34, [BASE] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | mov CARG2, CARG3 + | checktab CARG4, ->fff_fallback + | mov CARG1, L + | add CARG3, BASE, #8 + | IOS mov RA, BASE + | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // Returns cTValue *. + | IOS mov BASE, RA + | ldrd CARG12, [CRET1] + | b ->fff_restv + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc_1 tonumber + | // Only handles the number case inline (without a base argument). + | checktp CARG2, LJ_TISNUM + | bls ->fff_restv + | b ->fff_fallback + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | checktp CARG2, LJ_TSTR + | // A __tostring method in the string base metatable is ignored. + | beq ->fff_restv + | // Handle numbers inline, unless a number base metatable is present. + | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])] + | str BASE, L->base + | checktp CARG2, LJ_TISNUM + | cmpls CARG4, #0 + | str PC, SAVE_PC // Redundant (but a defined value). + | bhi ->fff_fallback + | ffgccheck + | mov CARG1, L + | mov CARG2, BASE + | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o) + | // Returns GCstr *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | mvn CARG4, #~LJ_TNIL + | checktab CARG2, ->fff_fallback + | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil. + | ldr PC, [BASE, FRAME_PC] + | mov CARG2, CARG1 + | str BASE, L->base // Add frame since C call can throw. + | mov CARG1, L + | str BASE, L->top // Dummy frame length is ok. + | add CARG3, BASE, #8 + | str PC, SAVE_PC + | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Returns 0 at end of traversal. + | IOS ldr BASE, L->base + | cmp CRET1, #0 + | mvneq CRET2, #~LJ_TNIL + | beq ->fff_restv // End of traversal: return nil. + | ldrd CARG12, [BASE, #8] // Copy key and value to results. + | ldrd CARG34, [BASE, #16] + | mov RC, #(2+1)*8 + | strd CARG12, [BASE, #-8] + | strd CARG34, [BASE] + | b ->fff_res + | + |.ffunc_1 pairs + | checktab CARG2, ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | ldr TAB:RB, TAB:CARG1->metatable +#endif + | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmp TAB:RB, #0 + | bne ->fff_fallback +#endif + | mvn CARG2, #~LJ_TNIL + | mov RC, #(3+1)*8 + | strd CFUNC:CARG34, [BASE, #-8] + | str CARG2, [BASE, #12] + | b ->fff_res + | + |.ffunc_2 ipairs_aux + | checktp CARG2, LJ_TTAB + | checktpeq CARG4, LJ_TISNUM + | bne ->fff_fallback + | ldr RB, TAB:CARG1->asize + | ldr RC, TAB:CARG1->array + | add CARG3, CARG3, #1 + | ldr PC, [BASE, FRAME_PC] + | cmp CARG3, RB + | add RC, RC, CARG3, lsl #3 + | strd CARG34, [BASE, #-8] + | ldrdlo CARG12, [RC] + | mov RC, #(0+1)*8 + | bhs >2 // Not in array part? + |1: + | checktp CARG2, LJ_TNIL + | movne RC, #(2+1)*8 + | strdne CARG12, [BASE] + | b ->fff_res + |2: // Check for empty hash part first. Otherwise call C function. + | ldr RB, TAB:CARG1->hmask + | mov CARG2, CARG3 + | cmp RB, #0 + | beq ->fff_res + | IOS mov RA, BASE + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | IOS mov BASE, RA + | cmp CRET1, #0 + | beq ->fff_res + | ldrd CARG12, [CRET1] + | b <1 + | + |.ffunc_1 ipairs + | checktab CARG2, ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | ldr TAB:RB, TAB:CARG1->metatable +#endif + | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0] + | ldr PC, [BASE, FRAME_PC] +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmp TAB:RB, #0 + | bne ->fff_fallback +#endif + | mov CARG1, #0 + | mvn CARG2, #~LJ_TISNUM + | mov RC, #(3+1)*8 + | strd CFUNC:CARG34, [BASE, #-8] + | strd CARG12, [BASE, #8] + | b ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. + | mov RB, BASE + | add BASE, BASE, #8 + | moveq PC, #8+FRAME_PCALL + | movne PC, #8+FRAME_PCALLH + | sub NARGS8:RC, NARGS8:RC, #8 + | b ->vm_call_dispatch + | + |.ffunc_2 xpcall + | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)] + | checkfunc CARG4, ->fff_fallback // Traceback must be a function. + | mov RB, BASE + | strd CARG12, [BASE, #8] // Swap function and traceback. + | strd CARG34, [BASE] + | tst RA, #HOOK_ACTIVE // Remember active hook before pcall. + | add BASE, BASE, #16 + | moveq PC, #16+FRAME_PCALL + | movne PC, #16+FRAME_PCALLH + | sub NARGS8:RC, NARGS8:RC, #16 + | b ->vm_call_dispatch + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | checktp CARG2, LJ_TTHREAD + | bne ->fff_fallback + |.else + |.ffunc coroutine_wrap_aux + | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr + |.endif + | ldr PC, [BASE, FRAME_PC] + | str BASE, L->base + | ldr CARG2, L:CARG1->top + | ldrb RA, L:CARG1->status + | ldr RB, L:CARG1->base + | add CARG3, CARG2, NARGS8:RC + | add CARG4, CARG2, RA + | str PC, SAVE_PC + | cmp CARG4, RB + | beq ->fff_fallback + | ldr CARG4, L:CARG1->maxstack + | ldr RB, L:CARG1->cframe + | cmp RA, #LUA_YIELD + | cmpls CARG3, CARG4 + | cmpls RB, #0 + | bhi ->fff_fallback + |1: + |.if resume + | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC. + | add BASE, BASE, #8 + | sub NARGS8:RC, NARGS8:RC, #8 + |.endif + | str CARG3, L:CARG1->top + | str BASE, L->top + |2: // Move args to coroutine. + | ldrd CARG34, [BASE, RB] + | cmp RB, NARGS8:RC + | strdne CARG34, [CARG2, RB] + | add RB, RB, #8 + | bne <2 + | + | mov CARG3, #0 + | mov L:RA, L:CARG1 + | mov CARG4, #0 + | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | // Returns thread status. + |4: + | ldr CARG3, L:RA->base + | mv_vmstate CARG2, INTERP + | ldr CARG4, L:RA->top + | st_vmstate CARG2 + | cmp CRET1, #LUA_YIELD + | ldr BASE, L->base + | bhi >8 + | subs RC, CARG4, CARG3 + | ldr CARG1, L->maxstack + | add CARG2, BASE, RC + | beq >6 // No results? + | cmp CARG2, CARG1 + | mov RB, #0 + | bhi >9 // Need to grow stack? + | + | sub CARG4, RC, #8 + | str CARG3, L:RA->top // Clear coroutine stack. + |5: // Move results from coroutine. + | ldrd CARG12, [CARG3, RB] + | cmp RB, CARG4 + | strd CARG12, [BASE, RB] + | add RB, RB, #8 + | bne <5 + |6: + |.if resume + | mvn CARG3, #~LJ_TTRUE + | add RC, RC, #16 + |7: + | str CARG3, [BASE, #-4] // Prepend true/false to results. + | sub RA, BASE, #8 + |.else + | mov RA, BASE + | add RC, RC, #8 + |.endif + | ands CARG1, PC, #FRAME_TYPE + | str PC, SAVE_PC + | str RC, SAVE_MULTRES + | beq ->BC_RET_Z + | b ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | ldrd CARG12, [CARG4, #-8]! + | mvn CARG3, #~LJ_TFALSE + | mov RC, #(2+1)*8 + | str CARG4, L:RA->top // Remove error from coroutine stack. + | strd CARG12, [BASE] // Copy error message. + | b <7 + |.else + | mov CARG1, L + | mov CARG2, L:RA + | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + | // Never returns. + |.endif + | + |9: // Handle stack expansion on return from yield. + | mov CARG1, L + | lsr CARG2, RC, #3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | mov CRET1, #0 + | b <4 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | ldr CARG1, L->cframe + | add CARG2, BASE, NARGS8:RC + | str BASE, L->base + | tst CARG1, #CFRAME_RESUME + | str CARG2, L->top + | mov CRET1, #LUA_YIELD + | mov CARG3, #0 + | beq ->fff_fallback + | str CARG3, L->cframe + | strb CRET1, L->status + | b ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.macro math_round, func + | .ffunc_1 math_ .. func + | checktp CARG2, LJ_TISNUM + | beq ->fff_restv + | bhi ->fff_fallback + | // Round FP value and normalize result. + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | bpl >2 // |x| < 1? + | mvn CARG4, #0x3e0 + | subs RB, CARG4, RB, asr #21 + | lsl CARG4, CARG2, #11 + | lsl CARG3, CARG1, #11 + | orr CARG4, CARG4, #0x80000000 + | rsb INS, RB, #32 + | orr CARG4, CARG4, CARG1, lsr #21 + | bls >3 // |x| >= 2^31? + | orr CARG3, CARG3, CARG4, lsl INS + | lsr CARG1, CARG4, RB + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 + | addne CARG1, CARG1, #1 + |.else + | bics CARG3, CARG3, CARG2, asr #31 + | addsne CARG1, CARG1, #1 + | ldrdvs CARG12, >9 + | bvs ->fff_restv + |.endif + | cmp CARG2, #0 + | rsblt CARG1, CARG1, #0 + |1: + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |2: // |x| < 1 + | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1 + | moveq CARG1, #0 + | mvnne CARG1, #0 + |.else + | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1 + | moveq CARG1, #0 + | movne CARG1, #1 + |.endif + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |3: // |x| >= 2^31. Check for x == -(2^31). + | cmpeq CARG4, #0x80000000 + |.if "func" == "floor" + | cmpeq CARG3, #0 + |.endif + | bne >4 + | cmp CARG2, #0 + | movmi CARG1, #0x80000000 + | bmi <1 + |4: + | bl ->vm_..func + | b ->fff_restv + |.endmacro + | + | math_round floor + | math_round ceil + | + |.align 8 + |9: + | .long 0x00000000, 0x41e00000 // 2^31. + | + |.ffunc_1 math_abs + | checktp CARG2, LJ_TISNUM + | bhi ->fff_fallback + | bicne CARG2, CARG2, #0x80000000 + | bne ->fff_restv + | cmp CARG1, #0 + | rsbslt CARG1, CARG1, #0 + | ldrdvs CARG12, <9 + | // Fallthrough. + | + |->fff_restv: + | // CARG12 = TValue result. + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + |->fff_res1: + | // PC = return. + | mov RC, #(1+1)*8 + |->fff_res: + | // RC = (nresults+1)*8, PC = return. + | ands CARG1, PC, #FRAME_TYPE + | ldreq INS, [PC, #-4] + | str RC, SAVE_MULTRES + | sub RA, BASE, #8 + | bne ->vm_return + | decode_RB8 RB, INS + |5: + | cmp RB, RC // More results expected? + | bhi >6 + | decode_RA8 CARG1, INS + | ins_next1 + | ins_next2 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | sub BASE, RA, CARG1 + | ins_next3 + | + |6: // Fill up results with nil. + | add CARG2, RA, RC + | mvn CARG1, #~LJ_TNIL + | add RC, RC, #8 + | str CARG1, [CARG2, #-4] + | b <5 + | + |.macro math_extern, func + | .ffunc_n math_ .. func + | IOS mov RA, BASE + | bl extern func + | IOS mov BASE, RA + | b ->fff_restv + |.endmacro + | + |.macro math_extern2, func + | .ffunc_nn math_ .. func + | IOS mov RA, BASE + | bl extern func + | IOS mov BASE, RA + | b ->fff_restv + |.endmacro + | + | math_extern sqrt + | math_extern log + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |->ff_math_deg: + |.ffunc_n math_rad + | ldrd CARG34, CFUNC:CARG3->upvalue[0] + | bl extern __aeabi_dmul + | b ->fff_restv + | + |.ffunc_2 math_ldexp + | checktp CARG2, LJ_TISNUM + | bhs ->fff_fallback + | checktp CARG4, LJ_TISNUM + | bne ->fff_fallback + | IOS mov RA, BASE + | bl extern ldexp // (double x, int exp) + | IOS mov BASE, RA + | b ->fff_restv + | + |.ffunc_n math_frexp + | mov CARG3, sp + | IOS mov RA, BASE + | bl extern frexp + | IOS mov BASE, RA + | ldr CARG3, [sp] + | mvn CARG4, #~LJ_TISNUM + | ldr PC, [BASE, FRAME_PC] + | strd CARG12, [BASE, #-8] + | mov RC, #(2+1)*8 + | strd CARG34, [BASE] + | b ->fff_res + | + |.ffunc_n math_modf + | sub CARG3, BASE, #8 + | ldr PC, [BASE, FRAME_PC] + | IOS mov RA, BASE + | bl extern modf + | IOS mov BASE, RA + | mov RC, #(2+1)*8 + | strd CARG12, [BASE] + | b ->fff_res + | + |.macro math_minmax, name, cond, fcond + | .ffunc_1 name + | checktp CARG2, LJ_TISNUM + | mov RA, #8 + | bne >4 + |1: // Handle integers. + | ldrd CARG34, [BASE, RA] + | cmp RA, RC + | bhs ->fff_restv + | checktp CARG4, LJ_TISNUM + | bne >3 + | cmp CARG1, CARG3 + | add RA, RA, #8 + | mov..cond CARG1, CARG3 + | b <1 + |3: + | bhi ->fff_fallback + | // Convert intermediate result to number and continue below. + | bl extern __aeabi_i2d + | ldrd CARG34, [BASE, RA] + | b >6 + | + |4: + | bhi ->fff_fallback + |5: // Handle numbers. + | ldrd CARG34, [BASE, RA] + | cmp RA, RC + | bhs ->fff_restv + | checktp CARG4, LJ_TISNUM + | bhs >7 + |6: + | bl extern __aeabi_cdcmple + | add RA, RA, #8 + | mov..fcond CARG1, CARG3 + | mov..fcond CARG2, CARG4 + | b <5 + |7: // Convert integer to number and continue above. + | bhi ->fff_fallback + | strd CARG12, TMPD + | mov CARG1, CARG3 + | bl extern __aeabi_i2d + | ldrd CARG34, TMPD + | b <6 + |.endmacro + | + | math_minmax math_min, gt, hi + | math_minmax math_max, lt, lo + | + |//-- String library ----------------------------------------------------- + | + |.ffunc_1 string_len + | checkstr CARG2, ->fff_fallback + | ldr CARG1, STR:CARG1->len + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |.ffunc string_byte // Only handle the 1-arg case here. + | ldrd CARG12, [BASE] + | ldr PC, [BASE, FRAME_PC] + | cmp NARGS8:RC, #8 + | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument. + | bne ->fff_fallback + | ldr CARG3, STR:CARG1->len + | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end). + | mvn CARG2, #~LJ_TISNUM + | cmp CARG3, #0 + | moveq RC, #(0+1)*8 + | movne RC, #(1+1)*8 + | strd CARG12, [BASE, #-8] + | b ->fff_res + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | ldrd CARG12, [BASE] + | ldr PC, [BASE, FRAME_PC] + | cmp NARGS8:RC, #8 // Need exactly 1 argument. + | checktpeq CARG2, LJ_TISNUM + | bicseq CARG4, CARG1, #255 + | mov CARG3, #1 + | bne ->fff_fallback + | str CARG1, TMPD + | mov CARG2, TMPDp // Points to stack. Little-endian. + |->fff_newstr: + | // CARG2 = str, CARG3 = len. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_str_new // (lua_State *L, char *str, size_t l) + | // Returns GCstr *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |.ffunc string_sub + | ffgccheck + | ldrd CARG12, [BASE] + | ldrd CARG34, [BASE, #16] + | cmp NARGS8:RC, #16 + | mvn RB, #0 + | beq >1 + | blo ->fff_fallback + | checktp CARG4, LJ_TISNUM + | mov RB, CARG3 + | bne ->fff_fallback + |1: + | ldrd CARG34, [BASE, #8] + | checktp CARG2, LJ_TSTR + | ldreq CARG2, STR:CARG1->len + | checktpeq CARG4, LJ_TISNUM + | bne ->fff_fallback + | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end + | add CARG4, CARG2, #1 + | cmp CARG3, #0 // if (start < 0) start += len+1 + | addlt CARG3, CARG3, CARG4 + | cmp CARG3, #1 // if (start < 1) start = 1 + | movlt CARG3, #1 + | cmp RB, #0 // if (end < 0) end += len+1 + | addlt RB, RB, CARG4 + | bic RB, RB, RB, asr #31 // if (end < 0) end = 0 + | cmp RB, CARG2 // if (end > len) end = len + | add CARG1, STR:CARG1, #sizeof(GCstr)-1 + | movgt RB, CARG2 + | add CARG2, CARG1, CARG3 + | subs CARG3, RB, CARG3 // len = start - end + | add CARG3, CARG3, #1 // len += 1 + | bge ->fff_newstr + |->fff_emptystr: + | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty) + | mvn CARG2, #~LJ_TSTR + | b ->fff_restv + | + |.ffunc string_rep // Only handle the 1-char case inline. + | ffgccheck + | ldrd CARG12, [BASE] + | ldrd CARG34, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | checktp CARG2, LJ_TSTR + | checktpeq CARG4, LJ_TISNUM + | bne ->fff_fallback + | subs CARG4, CARG3, #1 + | ldr CARG2, STR:CARG1->len + | blt ->fff_emptystr // Count <= 0? + | cmp CARG2, #1 + | blo ->fff_emptystr // Zero-length string? + | bne ->fff_fallback // Fallback for > 1-char strings. + | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)] + | ldr CARG1, STR:CARG1[1] + | cmp RB, CARG3 + | blo ->fff_fallback + |1: // Fill buffer with char. + | strb CARG1, [CARG2, CARG4] + | subs CARG4, CARG4, #1 + | bge <1 + | b ->fff_newstr + | + |.ffunc string_reverse + | ffgccheck + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | checkstr CARG2, ->fff_fallback + | ldr CARG3, STR:CARG1->len + | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)] + | mov CARG4, CARG3 + | add CARG1, STR:CARG1, #sizeof(GCstr) + | cmp RB, CARG3 + | blo ->fff_fallback + |1: // Reverse string copy. + | ldrb RB, [CARG1], #1 + | subs CARG4, CARG4, #1 + | blt ->fff_newstr + | strb RB, [CARG2, CARG4] + | b <1 + | + |.macro ffstring_case, name, lo + | .ffunc name + | ffgccheck + | ldrd CARG12, [BASE] + | cmp NARGS8:RC, #8 + | blo ->fff_fallback + | checkstr CARG2, ->fff_fallback + | ldr CARG3, STR:CARG1->len + | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)] + | mov CARG4, #0 + | add CARG1, STR:CARG1, #sizeof(GCstr) + | cmp RB, CARG3 + | blo ->fff_fallback + |1: // ASCII case conversion. + | ldrb RB, [CARG1, CARG4] + | cmp CARG4, CARG3 + | bhs ->fff_newstr + | sub RC, RB, #lo + | cmp RC, #26 + | eorlo RB, RB, #0x20 + | strb RB, [CARG2, CARG4] + | add CARG4, CARG4, #1 + | b <1 + |.endmacro + | + |ffstring_case string_lower, 65 + |ffstring_case string_upper, 97 + | + |//-- Table library ------------------------------------------------------ + | + |.ffunc_1 table_getn + | checktab CARG2, ->fff_fallback + | IOS mov RA, BASE + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | IOS mov BASE, RA + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |//-- Bit library -------------------------------------------------------- + | + |// FP number to bit conversion for soft-float. Clobbers r0-r3. + |->vm_tobit_fb: + | bhi ->fff_fallback + |->vm_tobit: + | lsl RB, CARG2, #1 + | adds RB, RB, #0x00200000 + | movpl CARG1, #0 // |x| < 1? + | bxpl lr + | mvn CARG4, #0x3e0 + | subs RB, CARG4, RB, asr #21 + | bmi >1 // |x| >= 2^32? + | lsl CARG4, CARG2, #11 + | orr CARG4, CARG4, #0x80000000 + | orr CARG4, CARG4, CARG1, lsr #21 + | cmp CARG2, #0 + | lsr CARG1, CARG4, RB + | rsblt CARG1, CARG1, #0 + | bx lr + |1: + | add RB, RB, #21 + | lsr CARG4, CARG1, RB + | rsb RB, RB, #20 + | lsl CARG1, CARG2, #12 + | cmp CARG2, #0 + | orr CARG1, CARG4, CARG1, lsl RB + | rsblt CARG1, CARG1, #0 + | bx lr + | + |.macro .ffunc_bit, name + | .ffunc_1 bit_..name + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + |.endmacro + | + |.ffunc_bit tobit + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | mov CARG3, CARG1 + | mov RA, #8 + |1: + | ldrd CARG12, [BASE, RA] + | cmp RA, NARGS8:RC + | add RA, RA, #8 + | bge >2 + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + | ins CARG3, CARG3, CARG1 + | b <1 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, orr + |.ffunc_bit_op bxor, eor + | + |2: + | mvn CARG4, #~LJ_TISNUM + | ldr PC, [BASE, FRAME_PC] + | strd CARG34, [BASE, #-8] + | b ->fff_res1 + | + |.ffunc_bit bswap + | eor CARG3, CARG1, CARG1, ror #16 + | bic CARG3, CARG3, #0x00ff0000 + | ror CARG1, CARG1, #8 + | mvn CARG2, #~LJ_TISNUM + | eor CARG1, CARG1, CARG3, lsr #8 + | b ->fff_restv + | + |.ffunc_bit bnot + | mvn CARG1, CARG1 + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + | + |.macro .ffunc_bit_sh, name, ins, shmod + | .ffunc bit_..name + | ldrd CARG12, [BASE, #8] + | cmp NARGS8:RC, #16 + | blo ->fff_fallback + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + |.if shmod == 0 + | and RA, CARG1, #31 + |.else + | rsb RA, CARG1, #0 + |.endif + | ldrd CARG12, [BASE] + | checktp CARG2, LJ_TISNUM + | blne ->vm_tobit_fb + | ins CARG1, CARG1, RA + | mvn CARG2, #~LJ_TISNUM + | b ->fff_restv + |.endmacro + | + |.ffunc_bit_sh lshift, lsl, 0 + |.ffunc_bit_sh rshift, lsr, 0 + |.ffunc_bit_sh arshift, asr, 0 + |.ffunc_bit_sh rol, ror, 1 + |.ffunc_bit_sh ror, ror, 0 + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RC = nargs*8 + | ldr CARG3, [BASE, FRAME_FUNC] + | ldr CARG2, L->maxstack + | add CARG1, BASE, NARGS8:RC + | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC. + | str CARG1, L->top + | ldr CARG3, CFUNC:CARG3->f + | str BASE, L->base + | add CARG1, CARG1, #8*LUA_MINSTACK + | str PC, SAVE_PC // Redundant (but a defined value). + | cmp CARG1, CARG2 + | mov CARG1, L + | bhi >5 // Need to grow stack. + | blx CARG3 // (lua_State *L) + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | ldr BASE, L->base + | cmp CRET1, #0 + | lsl RC, CRET1, #3 + | sub RA, BASE, #8 + | bgt ->fff_res // Returned nresults+1? + |1: // Returned 0 or -1: retry fast path. + | ldr CARG1, L->top + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | sub NARGS8:RC, CARG1, BASE + | bne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | ands CARG1, PC, #FRAME_TYPE + | bic CARG2, PC, #FRAME_TYPEP + | ldreq INS, [PC, #-4] + | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8. + | sub RB, BASE, CARG2 + | b ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov CARG2, #LUA_MINSTACK + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | cmp CARG1, CARG1 // Set zero-flag to force retry. + | b <1 + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | mov RA, lr + | str BASE, L->base + | add CARG2, BASE, NARGS8:RC + | str PC, SAVE_PC // Redundant (but a defined value). + | str CARG2, L->top + | mov CARG1, L + | bl extern lj_gc_step // (lua_State *L) + | ldr BASE, L->base + | mov lr, RA // Help return address predictor. + | ldr CFUNC:CARG3, [BASE, FRAME_FUNC] + | bx lr + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. +#if LJ_HASJIT + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent. + | bne >5 + | // Decrement the hookcount for consistency, but always do the call. + | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | tst CARG1, #HOOK_ACTIVE + | bne >1 + | sub CARG2, CARG2, #1 + | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT + | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | b >1 +#endif + | + |->vm_rethook: // Dispatch target for return hooks. + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | tst CARG1, #HOOK_ACTIVE // Hook already active? + | beq >1 + |5: // Re-dispatch to static ins. + | decode_OP OP, INS + | add OP, DISPATCH, OP, lsl #2 + | ldr pc, [OP, #GG_DISP2STATIC] + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)] + | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | tst CARG1, #HOOK_ACTIVE // Hook already active? + | bne <5 + | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT + | beq <5 + | subs CARG2, CARG2, #1 + | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)] + | beq >1 + | tst CARG1, #LUA_MASKLINE + | beq <5 + |1: + | mov CARG1, L + | str BASE, L->base + | mov CARG2, PC + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | ldr BASE, L->base + |4: // Re-dispatch to static ins. + | ldrb OP, [PC, #-4] + | ldr INS, [PC, #-4] + | add OP, DISPATCH, OP, lsl #2 + | ldr OP, [OP, #GG_DISP2STATIC] + | decode_RA8 RA, INS + | decode_RD RC, INS + | bx OP + | + |->cont_hook: // Continue from hook yield. + | ldr CARG1, [CARG4, #-24] + | add PC, PC, #4 + | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins. + | b <4 + | + |->vm_hotloop: // Hot loop counter underflow. +#if LJ_HASJIT + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L). + | sub CARG1, DISPATCH, #-GG_DISP2J + | str PC, SAVE_PC + | ldr CARG3, LFUNC:CARG3->field_pc + | mov CARG2, PC + | str L, [DISPATCH, #DISPATCH_J(L)] + | ldrb CARG3, [CARG3, #PC2PROTO(framesize)] + | str BASE, L->base + | add CARG3, BASE, CARG3, lsl #3 + | str CARG3, L->top + | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc) + | b <3 +#endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mov CARG2, PC +#if LJ_HASJIT + | b >1 +#endif + | + |->vm_hotcall: // Hot call counter underflow. +#if LJ_HASJIT + | orr CARG2, PC, #1 + |1: +#endif + | add CARG4, BASE, RC + | str PC, SAVE_PC + | mov CARG1, L + | str BASE, L->base + | sub RA, RA, BASE + | str CARG4, L->top + | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // Returns ASMFunction. + | ldr BASE, L->base + | ldr CARG4, L->top + | mov CARG2, #0 + | add RA, BASE, RA + | sub NARGS8:RC, CARG4, BASE + | str CARG2, SAVE_PC // Invalidate for subsequent line hook. + | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] + | ldr INS, [PC, #-4] + | bx CRET1 + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_exit_handler: +#if LJ_HASJIT + | sub sp, sp, #12 + | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12} + | ldr CARG1, [sp, #64] // Load original value of lr. + | ldr DISPATCH, [lr] // Load DISPATCH. + | add CARG3, sp, #64 // Recompute original value of sp. + | mv_vmstate CARG4, EXIT + | str CARG3, [sp, #52] // Store sp in RID_SP + | st_vmstate CARG4 + | ldr CARG2, [CARG1, #-4]! // Get exit instruction. + | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC. + | str CARG1, [sp, #60] + | lsl CARG2, CARG2, #8 + | add CARG1, CARG1, CARG2, asr #6 + | ldr CARG2, [lr, #4] // Load exit stub group offset. + | sub CARG1, CARG1, lr + | ldr L, [DISPATCH, #DISPATCH_GL(jit_L)] + | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number. + | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)] + | str CARG1, [DISPATCH, #DISPATCH_J(exitno)] + | mov CARG4, #0 + | str L, [DISPATCH, #DISPATCH_J(L)] + | str BASE, L->base + | str CARG4, [DISPATCH, #DISPATCH_GL(jit_L)] + | sub CARG1, DISPATCH, #-GG_DISP2J + | mov CARG2, sp + | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) + | // Returns MULTRES (unscaled) or negated error code. + | ldr CARG2, L->cframe + | ldr BASE, L->base + | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated. + | mov sp, CARG2 + | ldr PC, SAVE_PC // Get SAVE_PC. + | str L, SAVE_L // Set SAVE_L (on-trace resume/yield). + | b >1 +#endif + |->vm_exit_interp: + | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set. +#if LJ_HASJIT + | ldr L, SAVE_L + |1: + | cmp CARG1, #0 + | blt >3 // Check for error from exit. + | lsl RC, CARG1, #3 + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | str RC, SAVE_MULTRES + | mov CARG3, #0 + | ldr CARG2, LFUNC:CARG2->field_pc + | str CARG3, [DISPATCH, #DISPATCH_GL(jit_L)] + | mv_vmstate CARG4, INTERP + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | // Modified copy of ins_next which handles function header dispatch, too. + | ldrb OP, [PC] + | mov MASKR8, #255 + | ldr INS, [PC], #4 + | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. + | st_vmstate CARG4 + | cmp OP, #BC_FUNCF // Function header? + | ldr OP, [DISPATCH, OP, lsl #2] + | decode_RA8 RA, INS + | lsrlo RC, INS, #16 // No: Decode operands A*8 and D. + | subhs RC, RC, #8 + | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8 + | bx OP + | + |3: // Rethrow error from the right C frame. + | rsb CARG2, CARG1, #0 + | mov CARG1, L + | bl extern lj_err_throw // (lua_State *L, int errcode) +#endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called from JIT code. + |// + |// double lj_vm_floor/ceil/trunc(double x); + |.macro vm_round, func + |->vm_ .. func: + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | bpl >2 // |x| < 1? + | mvn CARG4, #0x3cc + | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. + | bxlo lr // |x| >= 2^52: done. + | mvn CARG4, #1 + | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask + | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask + | subs RB, RB, #32 + | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask + | orrpl CARG3, CARG3, CARG4 + | mvnpl CARG4, #1 + | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) + |.else + | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) + |.endif + | bxeq lr // iszero: done. + | mvn CARG4, #1 + | cmp RB, #0 + | lslpl CARG3, CARG4, RB + | mvnmi CARG3, #0 + | add RB, RB, #32 + | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask + | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry + | bx lr + | + |2: // |x| < 1: + | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo + |.if "func" == "floor" + | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0) + |.else + | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0) + |.endif + | mov CARG1, #0 // lo = 0 + | and CARG2, CARG2, #0x80000000 + | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0) + | orrne CARG2, CARG2, CARG4 + | bx lr + |.endmacro + | + |9: + | .long 0x3ff00000 // hiword(1.0) + | vm_round floor + | vm_round ceil + | + |->vm_trunc: +#if LJ_HASJIT + | lsl CARG3, CARG2, #1 + | adds RB, CARG3, #0x00200000 + | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0. + | movpl CARG1, #0 + | bxpl lr + | mvn CARG4, #0x3cc + | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0. + | bxlo lr // |x| >= 2^52: already done. + | mvn CARG4, #1 + | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask + | subs RB, RB, #32 + | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask + | bx lr +#endif + | + | // double lj_vm_mod(double dividend, double divisor); + |->vm_mod: + | push {r0, r1, r2, r3, r4, lr} + | bl extern __aeabi_ddiv + | bl ->vm_floor + | ldrd CARG34, [sp, #8] + | bl extern __aeabi_dmul + | ldrd CARG34, [sp] + | eor CARG2, CARG2, #0x80000000 + | bl extern __aeabi_dadd + | add sp, sp, #20 + | pop {pc} + | + | // int lj_vm_modi(int dividend, int divisor); + |->vm_modi: + | ands RB, CARG1, #0x80000000 + | rsbmi CARG1, CARG1, #0 // a = |dividend| + | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor). + | cmp CARG2, #0 + | rsbmi CARG2, CARG2, #0 // b = |divisor| + | subs CARG4, CARG2, #1 + | cmpne CARG1, CARG2 + | moveq CARG1, #0 // if (b == 1 || a == b) a = 0 + | tsthi CARG2, CARG4 + | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1 + | bls >1 + | // Use repeated subtraction to get the remainder. + | clz CARG3, CARG1 + | clz CARG4, CARG2 + | sub CARG4, CARG4, CARG3 + | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8 + | addne pc, pc, CARG3, lsl #3 // Duff's device. + | nop + { + int i; + for (i = 31; i >= 0; i--) { + | cmp CARG1, CARG2, lsl #i + | subhs CARG1, CARG1, CARG2, lsl #i + } + } + |1: + | cmp CARG1, #0 + | cmpne RB, #0 + | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b + | eors CARG2, CARG1, RB, lsl #1 + | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y + | bx lr + | + |// Callable from C: double lj_vm_foldarith(double x, double y, int op) + |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -) + |// and basic math functions. ORDER ARITH + |->vm_foldarith: + | ldr OP, [sp] + | cmp OP, #1 + | blo extern __aeabi_dadd + | beq extern __aeabi_dsub + | cmp OP, #3 + | blo extern __aeabi_dmul + | beq extern __aeabi_ddiv + | cmp OP, #5 + | blo ->vm_mod + | beq extern pow + | cmp OP, #7 + | eorlo CARG2, CARG2, #0x80000000 + | biceq CARG2, CARG2, #0x80000000 + | bxls lr +#if LJ_HASJIT + | cmp OP, #9 + | blo extern atan2 + | beq >9 // No support needed for IR_LDEXP. + | cmp OP, #11 + | bhi >9 + | push {r4, lr} + | beq >1 + | // IR_MIN + | bl extern __aeabi_cdcmple + | movhi CARG1, CARG3 + | movhi CARG2, CARG4 + | pop {r4, pc} + |9: + | NYI // Bad op. + | + |1: // IR_MAX + | bl extern __aeabi_cdcmple + | movlo CARG1, CARG3 + | movlo CARG2, CARG4 + | pop {r4, pc} +#else + | NYI // Other operations only needed by JIT compiler. +#endif + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_ffi_call: +#if LJ_HASFFI + | .type CCSTATE, CCallState, r4 + | push {CCSTATE, r5, r11, lr} + | mov CCSTATE, CARG1 + | ldr CARG1, CCSTATE:CARG1->spadj + | ldrb CARG2, CCSTATE->nsp + | add CARG3, CCSTATE, #offsetof(CCallState, stack) + | mov r11, sp + | sub sp, sp, CARG1 // Readjust stack. + | subs CARG2, CARG2, #1 + | ldr RB, CCSTATE->func + | bmi >2 + |1: // Copy stack slots. + | ldr CARG4, [CARG3, CARG2, lsl #2] + | str CARG4, [sp, CARG2, lsl #2] + | subs CARG2, CARG2, #1 + | bpl <1 + |2: + | ldr CARG1, CCSTATE->gpr[0] + | ldr CARG2, CCSTATE->gpr[1] + | ldr CARG3, CCSTATE->gpr[2] + | ldr CARG4, CCSTATE->gpr[3] + | blx RB + | mov sp, r11 + | str CRET1, CCSTATE->gpr[0] + | str CRET2, CCSTATE->gpr[1] + | pop {CCSTATE, r5, r11, pc} +#endif + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1*8, RC = src2, JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, BASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TISNUM + | bne >3 + | checktp CARG4, LJ_TISNUM + | bne >4 + | cmp CARG1, CARG3 + if (op == BC_ISLT) { + | sublt PC, RB, #0x20000 + } else if (op == BC_ISGE) { + | subge PC, RB, #0x20000 + } else if (op == BC_ISLE) { + | suble PC, RB, #0x20000 + } else { + | subgt PC, RB, #0x20000 + } + |1: + | ins_next + | + |3: // CARG12 is not an integer. + | bhi ->vmeta_comp + | // CARG12 is a number. + | checktp CARG4, LJ_TISNUM + | movlo RA, RB // Save RB. + | blo >5 + | // CARG12 is a number, CARG3 is an integer. + | mov CARG1, CARG3 + | mov RC, RA + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | mov CARG3, CARG1 + | mov CARG4, CARG2 + | ldrd CARG12, [RC] // Restore first operand. + | b >5 + |4: // CARG1 is an integer, CARG34 is not an integer. + | bhi ->vmeta_comp + | // CARG1 is an integer, CARG34 is a number + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | ldrd CARG34, [RC] // Restore second operand. + |5: // CARG12 and CARG34 are numbers. + | bl extern __aeabi_cdcmple + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + if (op == BC_ISLT) { + | sublo PC, RA, #0x20000 + } else if (op == BC_ISGE) { + | subhs PC, RA, #0x20000 + } else if (op == BC_ISLE) { + | subls PC, RA, #0x20000 + } else { + | subhi PC, RA, #0x20000 + } + | b <1 + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1*8, RC = src2, JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, BASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TISNUM + | cmnls CARG4, #-LJ_TISNUM + if (vk) { + | bls ->BC_ISEQN_Z + } else { + | bls ->BC_ISNEN_Z + } + | // Either or both types are not numbers. + if (LJ_HASFFI) { + | checktp CARG2, LJ_TCDATA + | checktpne CARG4, LJ_TCDATA + | beq ->vmeta_equal_cd + } + | cmp CARG2, CARG4 // Compare types. + | bne >2 // Not the same type? + | checktp CARG2, LJ_TISPRI + | bhs >1 // Same type and primitive type? + | + | // Same types and not a primitive type. Compare GCobj or pvalue. + | cmp CARG1, CARG3 + if (vk) { + | bne >3 // Different GCobjs or pvalues? + |1: // Branch if same. + | sub PC, RB, #0x20000 + |2: // Different. + | ins_next + |3: + | checktp CARG2, LJ_TISTABUD + | bhi <2 // Different objects and not table/ud? + } else { + | beq >1 // Same GCobjs or pvalues? + | checktp CARG2, LJ_TISTABUD + | bhi >2 // Different objects and not table/ud? + } + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + if (vk) { + | beq <2 // No metatable? + } else { + | beq >2 // No metatable? + } + | ldrb RA, TAB:RA->nomm + | mov CARG4, #1-vk // ne = 0 or 1. + | mov CARG2, CARG1 + | tst RA, #1<<MM_eq + | beq ->vmeta_equal // 'no __eq' flag not set? + if (vk) { + | b <2 + } else { + |2: // Branch if different. + | sub PC, RB, #0x20000 + |1: // Same. + | ins_next + } + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src*8, RC = str_const (~), JMP with RC = target + | mvn RC, RC + | ldrd CARG12, [BASE, RA] + | ldrh RB, [PC, #2] + | ldr STR:CARG3, [KBASE, RC, lsl #2] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TSTR + if (LJ_HASFFI) { + | bne >7 + | cmp CARG1, CARG3 + } else { + | cmpeq CARG1, CARG3 + } + if (vk) { + | subeq PC, RB, #0x20000 + |1: + } else { + |1: + | subne PC, RB, #0x20000 + } + | ins_next + | + if (LJ_HASFFI) { + |7: + | checktp CARG2, LJ_TCDATA + | bne <1 + | b ->vmeta_equal_cd + } + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src*8, RC = num_const (~), JMP with RC = target + | lsl RC, RC, #3 + | ldrd CARG12, [RA, BASE]! + | ldrh RB, [PC, #2] + | ldrd CARG34, [RC, KBASE]! + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + if (vk) { + |->BC_ISEQN_Z: + } else { + |->BC_ISNEN_Z: + } + | checktp CARG2, LJ_TISNUM + | bne >3 + | checktp CARG4, LJ_TISNUM + | bne >4 + | cmp CARG1, CARG3 + if (vk) { + | subeq PC, RB, #0x20000 + |1: + } else { + |1: + | subne PC, RB, #0x20000 + } + |2: + | ins_next + | + |3: // CARG12 is not an integer. + if (LJ_HASFFI) { + | bhi >7 + } else { + if (!vk) { + | subhi PC, RB, #0x20000 + } + | bhi <2 + } + | // CARG12 is a number. + | checktp CARG4, LJ_TISNUM + | movlo RA, RB // Save RB. + | blo >5 + | // CARG12 is a number, CARG3 is an integer. + | mov CARG1, CARG3 + | mov RC, RA + |4: // CARG1 is an integer, CARG34 is a number. + | mov RA, RB // Save RB. + | bl extern __aeabi_i2d + | ldrd CARG34, [RC] // Restore other operand. + |5: // CARG12 and CARG34 are numbers. + | bl extern __aeabi_cdcmpeq + if (vk) { + | subeq PC, RA, #0x20000 + } else { + | subne PC, RA, #0x20000 + } + | b <2 + | + if (LJ_HASFFI) { + |7: + | checktp CARG2, LJ_TCDATA + | bne <1 + | b ->vmeta_equal_cd + } + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src*8, RC = primitive_type (~), JMP with RC = target + | ldrd CARG12, [BASE, RA] + | ldrh RB, [PC, #2] + | add PC, PC, #4 + | mvn RC, RC + | add RB, PC, RB, lsl #2 + if (LJ_HASFFI) { + | checktp CARG2, LJ_TCDATA + | beq ->vmeta_equal_cd + } + | cmp CARG2, RC + if (vk) { + | subeq PC, RB, #0x20000 + } else { + | subne PC, RB, #0x20000 + } + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst*8 or unused, RC = src, JMP with RC = target + | add RC, BASE, RC, lsl #3 + | ldrh RB, [PC, #2] + | ldrd CARG12, [RC] + | add PC, PC, #4 + | add RB, PC, RB, lsl #2 + | checktp CARG2, LJ_TTRUE + if (op == BC_ISTC || op == BC_IST) { + | subls PC, RB, #0x20000 + if (op == BC_ISTC) { + | strdls CARG12, [BASE, RA] + } + } else { + | subhi PC, RB, #0x20000 + if (op == BC_ISFC) { + | strdhi CARG12, [BASE, RA] + } + } + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ins_next1 + | ldrd CARG12, [BASE, RC] + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_NOT: + | // RA = dst*8, RC = src + | add RC, BASE, RC, lsl #3 + | ins_next1 + | ldr CARG1, [RC, #4] + | add RA, BASE, RA + | ins_next2 + | checktp CARG1, LJ_TTRUE + | mvnls CARG2, #~LJ_TFALSE + | mvnhi CARG2, #~LJ_TTRUE + | str CARG2, [RA, #4] + | ins_next3 + break; + case BC_UNM: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ldrd CARG12, [BASE, RC] + | ins_next1 + | ins_next2 + | checktp CARG2, LJ_TISNUM + | bhi ->vmeta_unm + | eorne CARG2, CARG2, #0x80000000 + | bne >5 + | rsbseq CARG1, CARG1, #0 + | ldrdvs CARG12, >9 + |5: + | strd CARG12, [BASE, RA] + | ins_next3 + | + |.align 8 + |9: + | .long 0x00000000, 0x41e00000 // 2^31. + break; + case BC_LEN: + | // RA = dst*8, RC = src + | lsl RC, RC, #3 + | ldrd CARG12, [BASE, RC] + | checkstr CARG2, >2 + | ldr CARG1, STR:CARG1->len + |1: + | mvn CARG2, #~LJ_TISNUM + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |2: + | checktab CARG2, ->vmeta_len +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | ldr TAB:CARG3, TAB:CARG1->metatable + | cmp TAB:CARG3, #0 + | bne >9 + |3: +#endif + |->BC_LEN_Z: + | IOS mov RC, BASE + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | IOS mov BASE, RC + | b <1 +#ifdef LUAJIT_ENABLE_LUA52COMPAT + |9: + | ldrb CARG4, TAB:CARG3->nomm + | tst CARG4, #1<<MM_len + | bne <3 // 'no __len' flag set: done. + | b ->vmeta_len +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithcheck, cond, ncond, target + ||if (vk == 1) { + | cmn CARG4, #-LJ_TISNUM + | cmn..cond CARG2, #-LJ_TISNUM + ||} else { + | cmn CARG2, #-LJ_TISNUM + | cmn..cond CARG4, #-LJ_TISNUM + ||} + | b..ncond target + |.endmacro + |.macro ins_arithcheck_int, target + | ins_arithcheck eq, ne, target + |.endmacro + |.macro ins_arithcheck_num, target + | ins_arithcheck lo, hs, target + |.endmacro + | + |.macro ins_arithpre + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | ldrd CARG12, [BASE, RB] + | ldrd CARG34, [KBASE, RC] + || break; + ||case 1: + | ldrd CARG34, [BASE, RB] + | ldrd CARG12, [KBASE, RC] + || break; + ||default: + | ldrd CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + || break; + ||} + |.endmacro + | + |.macro ins_arithfallback, ins + ||switch (vk) { + ||case 0: + | ins ->vmeta_arith_vn + || break; + ||case 1: + | ins ->vmeta_arith_nv + || break; + ||default: + | ins ->vmeta_arith_vv + || break; + ||} + |.endmacro + | + |.macro ins_arithdn, intins, fpcall + | ins_arithpre + |.if "intins" ~= "vm_modi" + | ins_next1 + |.endif + | ins_arithcheck_int >5 + |.if "intins" == "smull" + | smull CARG1, RC, CARG3, CARG1 + | cmp RC, CARG1, asr #31 + | ins_arithfallback bne + |.elif "intins" == "vm_modi" + | movs CARG2, CARG3 + | ins_arithfallback beq + | bl ->vm_modi + | mvn CARG2, #~LJ_TISNUM + |.else + | intins CARG1, CARG1, CARG3 + | ins_arithfallback bvs + |.endif + |4: + |.if "intins" == "vm_modi" + | ins_next1 + |.endif + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |5: // FP variant. + | ins_arithfallback ins_arithcheck_num + |.if "intins" == "vm_modi" + | bl fpcall + |.else + | bl fpcall + | ins_next1 + |.endif + | b <4 + |.endmacro + | + |.macro ins_arithfp, fpcall + | ins_arithpre + | ins_arithfallback ins_arithcheck_num + |.if "fpcall" == "extern pow" + | IOS mov RC, BASE + | bl fpcall + | IOS mov BASE, RC + |.else + | bl fpcall + |.endif + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arithdn adds, extern __aeabi_dadd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arithdn subs, extern __aeabi_dsub + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arithdn smull, extern __aeabi_dmul + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arithfp extern __aeabi_ddiv + break; + case BC_MODVN: case BC_MODNV: case BC_MODVV: + | ins_arithdn vm_modi, ->vm_mod + break; + case BC_POW: + | // NYI: (partial) integer arithmetic. + | ins_arithfp extern pow + break; + + case BC_CAT: + | decode_RB8 RC, INS + | decode_RC8 RB, INS + | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!) + | sub CARG3, RB, RC + | str BASE, L->base + | add CARG2, BASE, RB + |->BC_CAT_Z: + | // RA = dst*8, RC = src_start*8, CARG2 = top-1 + | mov CARG1, L + | str PC, SAVE_PC + | lsr CARG3, CARG3, #3 + | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // Returns NULL (finished) or TValue * (metamethod). + | ldr BASE, L->base + | cmp CRET1, #0 + | bne ->vmeta_binop + | ldrd CARG34, [BASE, RC] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] // Copy result to RA. + | ins_next3 + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst*8, RC = str_const (~) + | mvn RC, RC + | ins_next1 + | ldr CARG1, [KBASE, RC, lsl #2] + | mvn CARG2, #~LJ_TSTR + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KCDATA: +#if LJ_HASFFI + | // RA = dst*8, RC = cdata_const (~) + | mvn RC, RC + | ins_next1 + | ldr CARG1, [KBASE, RC, lsl #2] + | mvn CARG2, #~LJ_TCDATA + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 +#endif + break; + case BC_KSHORT: + | // RA = dst*8, (RC = int16_literal) + | mov CARG1, INS, asr #16 // Refetch sign-extended reg. + | mvn CARG2, #~LJ_TISNUM + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KNUM: + | // RA = dst*8, RC = num_const + | lsl RC, RC, #3 + | ins_next1 + | ldrd CARG12, [KBASE, RC] + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + case BC_KPRI: + | // RA = dst*8, RC = primitive_type (~) + | add RA, BASE, RA + | mvn RC, RC + | ins_next1 + | ins_next2 + | str RC, [RA, #4] + | ins_next3 + break; + case BC_KNIL: + | // RA = base*8, RC = end + | add RA, BASE, RA + | add RC, BASE, RC, lsl #3 + | mvn CARG1, #~LJ_TNIL + | str CARG1, [RA, #4] + | add RA, RA, #8 + |1: + | str CARG1, [RA, #4] + | cmp RA, RC + | add RA, RA, #8 + | blt <1 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst*8, RC = uvnum + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsl RC, RC, #2 + | add RC, RC, #offsetof(GCfuncL, uvptr) + | ldr UPVAL:CARG2, [LFUNC:CARG2, RC] + | ldr CARG2, UPVAL:CARG2->v + | ldrd CARG34, [CARG2] + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + break; + case BC_USETV: + | // RA = uvnum*8, RC = src + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | lsl RC, RC, #3 + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldrd CARG34, [BASE, RC] + | ldrb RB, UPVAL:CARG2->marked + | ldrb RC, UPVAL:CARG2->closed + | ldr CARG2, UPVAL:CARG2->v + | tst RB, #LJ_GC_BLACK // isblack(uv) + | add RB, CARG4, #-LJ_TISGCV + | cmpne RC, #0 + | strd CARG34, [CARG2] + | bne >2 // Upvalue is closed and black? + |1: + | ins_next + | + |2: // Check if new value is collectable. + | cmn RB, #-(LJ_TISNUM - LJ_TISGCV) + | ldrbhi RC, GCOBJ:CARG3->gch.marked + | bls <1 // tvisgcv(v) + | sub CARG1, DISPATCH, #-GG_DISP2G + | tst RC, #LJ_GC_WHITES + | // Crossed a write barrier. Move the barrier forward. + if (LJ_TARGET_OSX) { + | beq <1 + | mov RC, BASE + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RC + } else { + | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) + } + | b <1 + break; + case BC_USETS: + | // RA = uvnum*8, RC = str_const (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | mvn RC, RC + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldr STR:CARG3, [KBASE, RC, lsl #2] + | mvn CARG4, #~LJ_TSTR + | ldrb RB, UPVAL:CARG2->marked + | ldr CARG2, UPVAL:CARG2->v + | ldrb RC, UPVAL:CARG2->closed + | tst RB, #LJ_GC_BLACK // isblack(uv) + | ldrb RB, STR:CARG3->marked + | strd CARG34, [CARG2] + | bne >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | tst RB, #LJ_GC_WHITES // iswhite(str) + | cmpne RC, #0 + | sub CARG1, DISPATCH, #-GG_DISP2G + | // Crossed a write barrier. Move the barrier forward. + if (LJ_TARGET_OSX) { + | beq <1 + | mov RC, BASE + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | mov BASE, RC + } else { + | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv) + } + | b <1 + break; + case BC_USETN: + | // RA = uvnum*8, RC = num_const + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | lsl RC, RC, #3 + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | ldrd CARG34, [KBASE, RC] + | ldr CARG2, UPVAL:CARG2->v + | ins_next1 + | ins_next2 + | strd CARG34, [CARG2] + | ins_next3 + break; + case BC_USETP: + | // RA = uvnum*8, RC = primitive_type (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | lsr RA, RA, #1 + | add RA, RA, #offsetof(GCfuncL, uvptr) + | ldr UPVAL:CARG2, [LFUNC:CARG2, RA] + | mvn RC, RC + | ldr CARG2, UPVAL:CARG2->v + | ins_next1 + | ins_next2 + | str RC, [CARG2, #4] + | ins_next3 + break; + + case BC_UCLO: + | // RA = level*8, RC = target + | ldr CARG3, L->openupval + | add RC, PC, RC, lsl #2 + | str BASE, L->base + | cmp CARG3, #0 + | sub PC, RC, #0x20000 + | beq >1 + | mov CARG1, L + | add CARG2, BASE, RA + | bl extern lj_func_closeuv // (lua_State *L, TValue *level) + | ldr BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst*8, RC = proto_const (~) (holding function prototype) + | mvn RC, RC + | str BASE, L->base + | ldr CARG2, [KBASE, RC, lsl #2] + | str PC, SAVE_PC + | ldr CARG3, [BASE, FRAME_FUNC] + | mov CARG1, L + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | bl extern lj_func_newL_gc + | // Returns GCfuncL *. + | ldr BASE, L->base + | mvn CARG2, #~LJ_TFUNC + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst*8, RC = (hbits|asize) | tab_const (~) + if (op == BC_TDUP) { + | mvn RC, RC + } + | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)] + | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)] + | str BASE, L->base + | str PC, SAVE_PC + | cmp CARG3, CARG4 + | mov CARG1, L + | bhs >5 + |1: + if (op == BC_TNEW) { + | lsl CARG2, RC, #21 + | lsr CARG3, RC, #11 + | asr RC, CARG2, #21 + | lsr CARG2, CARG2, #21 + | cmn RC, #1 + | addeq CARG2, CARG2, #2 + | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Returns GCtab *. + } else { + | ldr CARG2, [KBASE, RC, lsl #2] + | bl extern lj_tab_dup // (lua_State *L, Table *kt) + | // Returns GCtab *. + } + | ldr BASE, L->base + | mvn CARG2, #~LJ_TTAB + | ins_next1 + | ins_next2 + | strd CARG12, [BASE, RA] + | ins_next3 + |5: + | bl extern lj_gc_step_fixtop // (lua_State *L) + | mov CARG1, L + | b <1 + break; + + case BC_GGET: + | // RA = dst*8, RC = str_const (~) + case BC_GSET: + | // RA = dst*8, RC = str_const (~) + | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] + | mvn RC, RC + | ldr TAB:CARG1, LFUNC:CARG2->env + | ldr STR:RC, [KBASE, RC, lsl #2] + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + break; + + case BC_TGETV: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = dst*8, RB = table*8, RC = key*8 + | ldrd TAB:CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12. + | checktp CARG4, LJ_TISNUM // Integer key? + | ldreq CARG4, TAB:CARG1->array + | ldreq CARG2, TAB:CARG1->asize + | bne >9 + | + | add CARG4, CARG4, CARG3, lsl #3 + | cmp CARG3, CARG2 // In array part? + | ldrdlo CARG34, [CARG4] + | bhs ->vmeta_tgetv + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | beq >5 + |1: + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG2, TAB:CARG1->metatable + | cmp TAB:CARG2, #0 + | beq <1 // No metatable: done. + | ldrb CARG2, TAB:CARG2->nomm + | tst CARG2, #1<<MM_index + | bne <1 // 'no __index' flag set: done. + | decode_RB8 RB, INS // Restore RB. + | b ->vmeta_tgetv + | + |9: + | checktp CARG4, LJ_TSTR // String key? + | moveq STR:RC, CARG3 + | beq ->BC_TGETS_Z + | b ->vmeta_tgetv + break; + case BC_TGETS: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = dst*8, RB = table*8, RC = str_const (~) + | ldrd CARG12, [BASE, RB] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. + | checktab CARG2, ->vmeta_tgets1 + |->BC_TGETS_Z: + | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 + | ldr CARG3, TAB:CARG1->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:CARG1->node + | mov TAB:RB, TAB:CARG1 + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + |1: + | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS. + | ldrd CARG34, NODE:INS->val + | ldr NODE:INS, NODE:INS->next + | cmp CARG1, STR:RC + | checktpeq CARG2, LJ_TSTR + | bne >4 + | checktp CARG4, LJ_TNIL + | beq >5 + |3: + | ins_next1 + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |4: // Follow hash chain. + | cmp NODE:INS, #0 + | bne <1 + | // End of hash chain: key not found, nil result. + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG1, TAB:RB->metatable + | mov CARG3, #0 // Optional clear of undef. value (during load stall). + | mvn CARG4, #~LJ_TNIL + | cmp TAB:CARG1, #0 + | beq <3 // No metatable: done. + | ldrb CARG2, TAB:CARG1->nomm + | tst CARG2, #1<<MM_index + | bne <3 // 'no __index' flag set: done. + | b ->vmeta_tgets + break; + case BC_TGETB: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = dst*8, RB = table*8, RC = index + | ldrd CARG12, [BASE, RB] + | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12. + | ldr CARG3, TAB:CARG1->asize + | ldr CARG4, TAB:CARG1->array + | lsl CARG2, RC, #3 + | cmp RC, CARG3 + | ldrdlo CARG34, [CARG4, CARG2] + | bhs ->vmeta_tgetb + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | beq >5 + |1: + | ins_next2 + | strd CARG34, [BASE, RA] + | ins_next3 + | + |5: // Check for __index if table value is nil. + | ldr TAB:CARG2, TAB:CARG1->metatable + | cmp TAB:CARG2, #0 + | beq <1 // No metatable: done. + | ldrb CARG2, TAB:CARG2->nomm + | tst CARG2, #1<<MM_index + | bne <1 // 'no __index' flag set: done. + | b ->vmeta_tgetb + break; + + case BC_TSETV: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = src*8, RB = table*8, RC = key*8 + | ldrd TAB:CARG12, [BASE, RB] + | ldrd CARG34, [BASE, RC] + | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12. + | checktp CARG4, LJ_TISNUM // Integer key? + | ldreq CARG2, TAB:CARG1->array + | ldreq CARG4, TAB:CARG1->asize + | bne >9 + | + | add CARG2, CARG2, CARG3, lsl #3 + | cmp CARG3, CARG4 // In array part? + | ldrlo INS, [CARG2, #4] + | bhs ->vmeta_tsetv + | ins_next1 // Overwrites RB! + | checktp INS, LJ_TNIL + | ldrb INS, TAB:CARG1->marked + | ldrd CARG34, [BASE, RA] + | beq >5 + |1: + | tst INS, #LJ_GC_BLACK // isblack(table) + | strd CARG34, [CARG2] + | bne >7 + |2: + | ins_next2 + | ins_next3 + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + | beq <1 // No metatable: done. + | ldrb RA, TAB:RA->nomm + | tst RA, #1<<MM_newindex + | bne <1 // 'no __newindex' flag set: done. + | ldr INS, [PC, #-4] // Restore RA and RB. + | decode_RB8 RB, INS + | decode_RA8 RA, INS + | b ->vmeta_tsetv + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG1, INS, CARG3 + | b <2 + | + |9: + | checktp CARG4, LJ_TSTR // String key? + | moveq STR:RC, CARG3 + | beq ->BC_TSETS_Z + | b ->vmeta_tsetv + break; + case BC_TSETS: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = src*8, RB = table*8, RC = str_const (~) + | ldrd CARG12, [BASE, RB] + | mvn RC, RC + | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC. + | checktab CARG2, ->vmeta_tsets1 + |->BC_TSETS_Z: + | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 + | ldr CARG3, TAB:CARG1->hmask + | ldr CARG4, STR:RC->hash + | ldr NODE:INS, TAB:CARG1->node + | mov TAB:RB, TAB:CARG1 + | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask + | add CARG3, CARG3, CARG3, lsl #1 + | mov CARG4, #0 + | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 + | strb CARG4, TAB:RB->nomm // Clear metamethod cache. + |1: + | ldrd CARG12, NODE:INS->key + | ldr CARG4, NODE:INS->val.it + | ldr NODE:CARG3, NODE:INS->next + | cmp CARG1, STR:RC + | checktpeq CARG2, LJ_TSTR + | bne >5 + | ldrb CARG2, TAB:RB->marked + | checktp CARG4, LJ_TNIL // Key found, but nil value? + | ldrd CARG34, [BASE, RA] + | beq >4 + |2: + | tst CARG2, #LJ_GC_BLACK // isblack(table) + | strd CARG34, NODE:INS->val + | bne >7 + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | ldr TAB:CARG1, TAB:RB->metatable + | cmp TAB:CARG1, #0 + | beq <2 // No metatable: done. + | ldrb CARG1, TAB:CARG1->nomm + | tst CARG1, #1<<MM_newindex + | bne <2 // 'no __newindex' flag set: done. + | b ->vmeta_tsets + | + |5: // Follow hash chain. + | movs NODE:INS, NODE:CARG3 + | bne <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | ldr TAB:CARG1, TAB:RB->metatable + | mov CARG3, TMPDp + | str PC, SAVE_PC + | cmp TAB:CARG1, #0 // No metatable: continue. + | str BASE, L->base + | ldrbne CARG2, TAB:CARG1->nomm + | mov CARG1, L + | beq >6 + | tst CARG2, #1<<MM_newindex + | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mvn CARG4, #~LJ_TSTR + | str STR:RC, TMPDlo + | mov CARG2, TAB:RB + | str CARG4, TMPDhi + | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Returns TValue *. + | ldr BASE, L->base + | ldrd CARG34, [BASE, RA] + | strd CARG34, [CRET1] + | b <3 // No 2nd write barrier needed. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, CARG2, CARG3 + | b <3 + break; + case BC_TSETB: + | decode_RB8 RB, INS + | and RC, RC, #255 + | // RA = src*8, RB = table*8, RC = index + | ldrd CARG12, [BASE, RB] + | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12. + | ldr CARG3, TAB:CARG1->asize + | ldr RB, TAB:CARG1->array + | lsl CARG2, RC, #3 + | cmp RC, CARG3 + | ldrdlo CARG34, [CARG2, RB]! + | bhs ->vmeta_tsetb + | ins_next1 // Overwrites RB! + | checktp CARG4, LJ_TNIL + | ldrb INS, TAB:CARG1->marked + | ldrd CARG34, [BASE, RA] + | beq >5 + |1: + | tst INS, #LJ_GC_BLACK // isblack(table) + | strd CARG34, [CARG2] + | bne >7 + |2: + | ins_next2 + | ins_next3 + | + |5: // Check for __newindex if previous value is nil. + | ldr TAB:RA, TAB:CARG1->metatable + | cmp TAB:RA, #0 + | beq <1 // No metatable: done. + | ldrb RA, TAB:RA->nomm + | tst RA, #1<<MM_newindex + | bne <1 // 'no __newindex' flag set: done. + | ldr INS, [PC, #-4] // Restore INS. + | decode_RA8 RA, INS + | b ->vmeta_tsetb + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:CARG1, INS, CARG3 + | b <2 + break; + + case BC_TSETM: + | // RA = base*8 (table at base-1), RC = num_const (start index) + | add RA, BASE, RA + |1: + | ldr RB, SAVE_MULTRES + | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table. + | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word. + | subs RB, RB, #8 + | ldr CARG4, TAB:CARG2->asize + | beq >4 // Nothing to copy? + | add CARG3, CARG1, RB, lsr #3 + | cmp CARG3, CARG4 + | ldr CARG4, TAB:CARG2->array + | add RB, RA, RB + | bhi >5 + | add INS, CARG4, CARG1, lsl #3 + | ldrb CARG1, TAB:CARG2->marked + |3: // Copy result slots to table. + | ldrd CARG34, [RA], #8 + | strd CARG34, [INS], #8 + | cmp RA, RB + | blo <3 + | tst CARG1, #LJ_GC_BLACK // isblack(table) + | bne >7 + |4: + | ins_next + | + |5: // Need to resize array part. + | str BASE, L->base + | mov CARG1, L + | str PC, SAVE_PC + | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | // Must not reallocate the stack. + | IOS ldr BASE, L->base + | b <1 + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:RB, CARG1, CARG2 + | b <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base*8, (RB = nresults+1,) RC = extra_nargs + | ldr CARG1, SAVE_MULTRES + | decode_RC8 NARGS8:RC, INS + | add NARGS8:RC, NARGS8:RC, CARG1 + | b ->BC_CALL_Z + break; + case BC_CALL: + | decode_RC8 NARGS8:RC, INS + | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8 + |->BC_CALL_Z: + | mov RB, BASE // Save old BASE for vmeta_call. + | ldrd CARG34, [BASE, RA]! + | sub NARGS8:RC, NARGS8:RC, #8 + | add BASE, BASE, #8 + | checkfunc CARG4, ->vmeta_call + | ins_call + break; + + case BC_CALLMT: + | // RA = base*8, (RB = 0,) RC = extra_nargs + | ldr CARG1, SAVE_MULTRES + | add NARGS8:RC, CARG1, RC, lsl #3 + | b ->BC_CALLT1_Z + break; + case BC_CALLT: + | lsl NARGS8:RC, RC, #3 + | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 + |->BC_CALLT1_Z: + | ldrd LFUNC:CARG34, [RA, BASE]! + | sub NARGS8:RC, NARGS8:RC, #8 + | add RA, RA, #8 + | checkfunc CARG4, ->vmeta_callt + | ldr PC, [BASE, FRAME_PC] + |->BC_CALLT2_Z: + | mov RB, #0 + | ldrb CARG4, LFUNC:CARG3->ffid + | tst PC, #FRAME_TYPE + | bne >7 + |1: + | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC. + | cmp NARGS8:RC, #0 + | beq >3 + |2: + | ldrd CARG12, [RA, RB] + | add INS, RB, #8 + | cmp INS, NARGS8:RC + | strd CARG12, [BASE, RB] + | mov RB, INS + | bne <2 + |3: + | cmp CARG4, #1 // (> FF_C) Calling a fast function? + | bhi >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | ldr INS, [PC, #-4] + | decode_RA8 RA, INS + | sub CARG1, BASE, RA + | ldr LFUNC:CARG1, [CARG1, #-16] + | ldr CARG1, LFUNC:CARG1->field_pc + | ldr KBASE, [CARG1, #PC2PROTO(k)] + | b <4 + | + |7: // Tailcall from a vararg function. + | eor PC, PC, #FRAME_VARG + | tst PC, #FRAME_TYPEP // Vararg frame below? + | movne CARG4, #0 // Clear ffid if no Lua function below. + | bne <1 + | sub BASE, BASE, PC + | ldr PC, [BASE, FRAME_PC] + | tst PC, #FRAME_TYPE + | movne CARG4, #0 // Clear ffid if no Lua function below. + | b <1 + break; + + case BC_ITERC: + | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) + | add RA, BASE, RA + | mov RB, BASE // Save old BASE for vmeta_call. + | ldrd CARG34, [RA, #-16] + | ldrd CARG12, [RA, #-8] + | add BASE, RA, #8 + | strd CARG34, [RA, #8] // Copy state. + | strd CARG12, [RA, #16] // Copy control var. + | // STALL: locked CARG34. + | ldrd LFUNC:CARG34, [RA, #-24] + | mov NARGS8:RC, #16 // Iterators get 2 arguments. + | // STALL: load CARG34. + | strd LFUNC:CARG34, [RA] // Copy callable. + | checkfunc CARG4, ->vmeta_call + | ins_call + break; + + case BC_ITERN: + | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1)) +#if LJ_HASJIT + | // NYI: add hotloop, record BC_ITERN. +#endif + | add RA, BASE, RA + | ldr TAB:RB, [RA, #-16] + | ldr CARG1, [RA, #-8] // Get index from control var. + | ldr INS, TAB:RB->asize + | ldr CARG2, TAB:RB->array + | add PC, PC, #4 + |1: // Traverse array part. + | subs RC, CARG1, INS + | add CARG3, CARG2, CARG1, lsl #3 + | bhs >5 // Index points after array part? + | ldrd CARG34, [CARG3] + | checktp CARG4, LJ_TNIL + | addeq CARG1, CARG1, #1 // Skip holes in array part. + | beq <1 + | ldrh RC, [PC, #-2] + | mvn CARG2, #~LJ_TISNUM + | strd CARG34, [RA, #8] + | add RC, PC, RC, lsl #2 + | add RB, CARG1, #1 + | strd CARG12, [RA] + | sub PC, RC, #0x20000 + | str RB, [RA, #-8] // Update control var. + |3: + | ins_next + | + |5: // Traverse hash part. + | ldr CARG4, TAB:RB->hmask + | ldr NODE:RB, TAB:RB->node + |6: + | add CARG1, RC, RC, lsl #1 + | cmp RC, CARG4 // End of iteration? Branch to ITERL+1. + | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8 + | bhi <3 + | ldrd CARG12, NODE:CARG3->val + | checktp CARG2, LJ_TNIL + | add RC, RC, #1 + | beq <6 // Skip holes in hash part. + | ldrh RB, [PC, #-2] + | add RC, RC, INS + | ldrd CARG34, NODE:CARG3->key + | str RC, [RA, #-8] // Update control var. + | strd CARG12, [RA, #8] + | add RC, PC, RB, lsl #2 + | sub PC, RC, #0x20000 + | strd CARG34, [RA] + | b <3 + break; + + case BC_ISNEXT: + | // RA = base*8, RC = target (points to ITERN) + | add RA, BASE, RA + | add RC, PC, RC, lsl #2 + | ldrd CFUNC:CARG12, [RA, #-24] + | ldr CARG3, [RA, #-12] + | ldr CARG4, [RA, #-4] + | checktp CARG2, LJ_TFUNC + | ldrbeq CARG1, CFUNC:CARG1->ffid + | checktpeq CARG3, LJ_TTAB + | checktpeq CARG4, LJ_TNIL + | cmpeq CARG1, #FF_next_N + | subeq PC, RC, #0x20000 + | bne >5 + | ins_next1 + | ins_next2 + | mov CARG1, #0 + | str CARG1, [RA, #-8] // Initialize control var. + |1: + | ins_next3 + |5: // Despecialize bytecode if any of the checks fail. + | mov CARG1, #BC_JMP + | mov OP, #BC_ITERC + | strb CARG1, [PC, #-4] + | sub PC, RC, #0x20000 + | strb OP, [PC] // Subsumes ins_next1. + | ins_next2 + | b <1 + break; + + case BC_VARG: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 + | ldr CARG1, [BASE, FRAME_PC] + | add RC, BASE, RC + | add RA, BASE, RA + | add RC, RC, #FRAME_VARG + | add CARG4, RA, RB + | sub CARG3, BASE, #8 // CARG3 = vtop + | sub RC, RC, CARG1 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | cmp RB, #0 + | sub CARG1, CARG3, RC + | beq >5 // Copy all varargs? + | sub CARG4, CARG4, #16 + |1: // Copy vararg slots to destination slots. + | cmp RC, CARG3 + | ldrdlo CARG12, [RC], #8 + | mvnhs CARG2, #~LJ_TNIL + | cmp RA, CARG4 + | strd CARG12, [RA], #8 + | blo <1 + |2: + | ins_next + | + |5: // Copy all varargs. + | ldr CARG4, L->maxstack + | cmp CARG1, #0 + | movle RB, #8 // MULTRES = (0+1)*8 + | addgt RB, CARG1, #8 + | add CARG2, RA, CARG1 + | str RB, SAVE_MULTRES + | ble <2 + | cmp CARG2, CARG4 + | bhi >7 + |6: + | ldrd CARG12, [RC], #8 + | strd CARG12, [RA], #8 + | cmp RC, CARG3 + | blo <6 + | b <2 + | + |7: // Grow stack for varargs. + | lsr CARG2, CARG1, #3 + | str RA, L->top + | mov CARG1, L + | str BASE, L->base + | sub RC, RC, BASE // Need delta, because BASE may change. + | str PC, SAVE_PC + | sub RA, RA, BASE + | bl extern lj_state_growstack // (lua_State *L, int n) + | ldr BASE, L->base + | add RA, BASE, RA + | add RC, BASE, RC + | sub CARG3, BASE, #8 + | b <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results*8, RC = extra results + | ldr CARG1, SAVE_MULTRES + | ldr PC, [BASE, FRAME_PC] + | add RA, BASE, RA + | add RC, CARG1, RC, lsl #3 + | b ->BC_RETM_Z + break; + + case BC_RET: + | // RA = results*8, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | add RA, BASE, RA + |->BC_RETM_Z: + | str RC, SAVE_MULTRES + |1: + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | bne ->BC_RETV2_Z + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return + | ldr INS, [PC, #-4] + | subs CARG4, RC, #8 + | sub CARG3, BASE, #8 + | beq >3 + |2: + | ldrd CARG12, [RA], #8 + | add BASE, BASE, #8 + | subs CARG4, CARG4, #8 + | strd CARG12, [BASE, #-16] + | bne <2 + |3: + | decode_RA8 RA, INS + | sub CARG4, CARG3, RA + | decode_RB8 RB, INS + | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC] + |5: + | cmp RB, RC // More results expected? + | bhi >6 + | mov BASE, CARG4 + | ldr CARG2, LFUNC:CARG1->field_pc + | ins_next1 + | ins_next2 + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next3 + | + |6: // Fill up results with nil. + | mvn CARG2, #~LJ_TNIL + | add BASE, BASE, #8 + | add RC, RC, #8 + | str CARG2, [BASE, #-12] + | b <5 + | + |->BC_RETV1_Z: // Non-standard return case. + | add RA, BASE, RA + |->BC_RETV2_Z: + | tst CARG2, #FRAME_TYPEP + | bne ->vm_return + | // Return from vararg function: relocate BASE down. + | sub BASE, BASE, CARG2 + | ldr PC, [BASE, FRAME_PC] + | b <1 + break; + + case BC_RET0: case BC_RET1: + | // RA = results*8, RC = nresults+1 + | ldr PC, [BASE, FRAME_PC] + | lsl RC, RC, #3 + | str RC, SAVE_MULTRES + | ands CARG1, PC, #FRAME_TYPE + | eor CARG2, PC, #FRAME_VARG + | ldreq INS, [PC, #-4] + | bne ->BC_RETV1_Z + if (op == BC_RET1) { + | ldrd CARG12, [BASE, RA] + } + | sub CARG4, BASE, #8 + | decode_RA8 RA, INS + if (op == BC_RET1) { + | strd CARG12, [CARG4] + } + | sub BASE, CARG4, RA + | decode_RB8 RB, INS + | ldr LFUNC:CARG1, [BASE, FRAME_FUNC] + |5: + | cmp RB, RC + | bhi >6 + | ldr CARG2, LFUNC:CARG1->field_pc + | ins_next1 + | ins_next2 + | ldr KBASE, [CARG2, #PC2PROTO(k)] + | ins_next3 + | + |6: // Fill up results with nil. + | sub CARG2, CARG4, #4 + | mvn CARG3, #~LJ_TNIL + | str CARG3, [CARG2, RC] + | add RC, RC, #8 + | b <5 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4] + |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12] + |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20] + |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28] + + case BC_FORL: +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base*8, RC = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + | ldrd CARG12, [RA, BASE]! + if (op != BC_JFORL) { + | add RC, PC, RC, lsl #2 + } + if (!vk) { + | ldrd CARG34, FOR_STOP + | checktp CARG2, LJ_TISNUM + | ldr RB, FOR_TSTEP + | bne >5 + | checktp CARG4, LJ_TISNUM + | ldr CARG4, FOR_STEP + | checktpeq RB, LJ_TISNUM + | bne ->vmeta_for + | cmp CARG4, #0 + | blt >4 + | cmp CARG1, CARG3 + } else { + | ldrd CARG34, FOR_STEP + | checktp CARG2, LJ_TISNUM + | bne >5 + | adds CARG1, CARG1, CARG3 + | ldr CARG4, FOR_STOP + if (op == BC_IFORL) { + | addvs RC, PC, #0x20000 // Overflow: prevent branch. + } else { + | bvs >2 // Overflow: do not enter mcode. + } + | cmp CARG3, #0 + | blt >4 + | cmp CARG1, CARG4 + } + |1: + if (op == BC_FORI) { + | subgt PC, RC, #0x20000 + } else if (op == BC_JFORI) { + | sub PC, RC, #0x20000 + | ldrhle RC, [PC, #-2] + } else if (op == BC_IFORL) { + | suble PC, RC, #0x20000 + } + if (vk) { + | strd CARG12, FOR_IDX + } + |2: + | ins_next1 + | ins_next2 + | strd CARG12, FOR_EXT + if (op == BC_JFORI || op == BC_JFORL) { + | ble =>BC_JLOOP + } + |3: + | ins_next3 + | + |4: // Invert check for negative step. + if (!vk) { + | cmp CARG3, CARG1 + } else { + | cmp CARG4, CARG1 + } + | b <1 + | + |5: // FP loop. + if (!vk) { + | cmnlo CARG4, #-LJ_TISNUM + | cmnlo RB, #-LJ_TISNUM + | bhs ->vmeta_for + | cmp RB, #0 + | strd CARG12, FOR_IDX + | strd CARG12, FOR_EXT + | blt >8 + } else { + | cmp CARG4, #0 + | blt >8 + | bl extern __aeabi_dadd + | strd CARG12, FOR_IDX + | ldrd CARG34, FOR_STOP + | strd CARG12, FOR_EXT + } + |6: + | bl extern __aeabi_cdcmple + if (op == BC_FORI) { + | subhi PC, RC, #0x20000 + } else if (op == BC_JFORI) { + | sub PC, RC, #0x20000 + | ldrhls RC, [PC, #-2] + | bls =>BC_JLOOP + } else if (op == BC_IFORL) { + | subls PC, RC, #0x20000 + } else { + | bls =>BC_JLOOP + } + | ins_next1 + | ins_next2 + | b <3 + | + |8: // Invert check for negative step. + if (vk) { + | bl extern __aeabi_dadd + | strd CARG12, FOR_IDX + | strd CARG12, FOR_EXT + } + | mov CARG3, CARG1 + | mov CARG4, CARG2 + | ldrd CARG12, FOR_STOP + | b <6 + break; + + case BC_ITERL: +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base*8, RC = target + | ldrd CARG12, [RA, BASE]! + if (op == BC_JITERL) { + | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. + | strdne CARG12, [RA, #-8] + | bne =>BC_JLOOP + } else { + | add RC, PC, RC, lsl #2 + | // STALL: load CARG12. + | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil. + | subne PC, RC, #0x20000 // Otherwise save control var + branch. + | strdne CARG12, [RA, #-8] + } + | ins_next + break; + + case BC_LOOP: + | // RA = base*8, RC = target (loop extent) + | // Note: RA/RC is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base*8, RC = target (loop extent) + | ins_next + break; + + case BC_JLOOP: +#if LJ_HASJIT + | // RA = base (ignored), RC = traceno + | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)] + | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0. + | ldr TRACE:RC, [CARG1, RC, lsl #2] + | st_vmstate CARG2 + | ldr RA, TRACE:RC->mcode + | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)] + | str L, [DISPATCH, #DISPATCH_GL(jit_L)] + | bx RA +#endif + break; + + case BC_JMP: + | // RA = base*8 (only used by trace recorder), RC = target + | add RC, PC, RC, lsl #2 + | sub PC, RC, #0x20000 + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: +#if LJ_HASJIT + | hotcall +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)] + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | bhi ->vm_growstack_l + if (op != BC_JFUNCF) { + | ins_next1 + | ins_next2 + } + |2: + | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters. + | mvn CARG4, #~LJ_TNIL + | ble >3 + if (op == BC_JFUNCF) { + | decode_RD RC, INS + | b =>BC_JLOOP + } else { + | ins_next3 + } + | + |3: // Clear missing parameters. + | strd CARG34, [BASE, NARGS8:RC] + | add NARGS8:RC, NARGS8:RC, #8 + | b <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8 + | ldr CARG1, L->maxstack + | add CARG4, BASE, RC + | add RA, RA, RC + | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC. + | add CARG2, RC, #8+FRAME_VARG + | ldr KBASE, [PC, #-4+PC2PROTO(k)] + | cmp RA, CARG1 + | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG. + | bhs ->vm_growstack_l + | ldrb RB, [PC, #-4+PC2PROTO(numparams)] + | mov RA, BASE + | mov RC, CARG4 + | cmp RB, #0 + | add BASE, CARG4, #8 + | beq >3 + | mvn CARG3, #~LJ_TNIL + |1: + | cmp RA, RC // Less args than parameters? + | ldrdlo CARG12, [RA], #8 + | movhs CARG2, CARG3 + | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC). + |2: + | subs RB, RB, #1 + | strd CARG12, [CARG4, #8]! + | bne <1 + |3: + | ins_next + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | ldr CARG4, CFUNC:CARG3->f + } else { + | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)] + } + | add CARG2, RA, NARGS8:RC + | ldr CARG1, L->maxstack + | add RC, BASE, NARGS8:RC + | str BASE, L->base + | cmp CARG2, CARG1 + | str RC, L->top + if (op == BC_FUNCCW) { + | ldr CARG2, CFUNC:CARG3->f + } + | mv_vmstate CARG3, C + | mov CARG1, L + | bhi ->vm_growstack_c // Need to grow stack. + | st_vmstate CARG3 + | blx CARG4 // (lua_State *L [, lua_CFunction f]) + | // Returns nresults. + | ldr BASE, L->base + | mv_vmstate CARG3, INTERP + | ldr CRET2, L->top + | lsl RC, CRET1, #3 + | st_vmstate CARG3 + | ldr PC, [BASE, FRAME_PC] + | sub RA, CRET2, RC // RA = L->top - nresults*8 + | b ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 0xe\n" /* Return address is in lr. */ + "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x8e\n\t.uleb128 1\n", /* Restore lr. */ + (int)ctx->codesz, CFRAME_SIZE); + for (i = 11; i >= 4; i--) /* Restore r4-r11. */ + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i)); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); + break; + default: + break; + } +} + diff --git a/src/luajit2/src/buildvm_arm.h b/src/luajit2/src/buildvm_arm.h new file mode 100644 index 0000000000..505048bd91 --- /dev/null +++ b/src/luajit2/src/buildvm_arm.h @@ -0,0 +1,7368 @@ +/* +** This file has been pre-processed with DynASM. +** http://luajit.org/dynasm.html +** DynASM version 1.3.0, DynASM arm version 1.3.0 +** DO NOT EDIT! The original file is in "buildvm_arm.dasc". +*/ + +#if DASM_VERSION != 10300 +#error "Version mismatch between DynASM and included encoding engine" +#endif + +#define DASM_SECTION_CODE_OP 0 +#define DASM_SECTION_CODE_SUB 1 +#define DASM_MAXSECTION 2 +static const unsigned int build_actionlist[5702] = { +0x00010001, +0x00060014, +0xe3160000, +0x000a0000, +0x0a000000, +0x00050015, +0xe51c6004, +0xe3e01000, +0x000a0000, +0xe1a0900c, +0xe50a1004, +0xe24aa008, +0x00060016, +0xe28bb008, +0xe2160000, +0x000a0000, +0xe58db004, +0x0a000000, +0x00050017, +0x00060018, +0xe3c6c000, +0x000a0000, +0xe3500000, +0x000a0000, +0xe049c00c, +0x1a000000, +0x00050014, +0xe508c000, +0x000d8180, +0xe59d5014, +0xe3e03000, +0x000a0000, +0xe2499008, +0xe25b2008, +0xe1a05185, +0xe5073000, +0x000d8180, +0x0a000000, +0x00050002, +0x0006000b, +0xe2522008, +0xe0ca00d8, +0xe0c900f8, +0x1a000000, +0x0005000b, +0x0006000c, +0xe155000b, +0x1a000000, +0x00050006, +0x0006000d, +0xe5089000, +0x000d8180, +0x00060019, +0x00000000, +0xe59db010, +0xe3a00000, +0xe508b000, +0x000d8180, +0x0006001a, +0xe28dd01c, +0xe8bd8ff0, +0x00060010, +0xba000000, +0x00050007, +0xe5182000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xe1590002, +0x2a000000, +0x00050008, +0xe5891004, +0xe28bb008, +0xe2899008, +0xea000000, +0x0005000c, +0x00060011, +0xe04b0005, +0xe3550000, +0x10499000, +0xea000000, +0x0005000d, +0x00060012, +0xe5089000, +0x000d8180, +0xe1a01005, +0xe1a00008, +0xeb000000, +0x00030000, +0xe5189000, +0x000d8180, +0xea000000, +0x0005000c, +0x0006001b, +0xe1a0d000, +0xe1a00001, +0x0006001c, +0xe59d800c, +0xe3e03000, +0x000a0000, +0xe5182000, +0x000d8180, +0xe5023000, +0x000d8180, +0xea000000, +0x0005001a, +0x0006001d, +0x00000000, +0xe3c00000, +0x000a0000, +0xe1a0d000, +0x0006001e, +0xe59d800c, +0xe3a040ff, +0xe3a0b010, +0xe1a04184, +0xe5189000, +0x000d8180, +0xe5187000, +0x000d8180, +0xe3e00000, +0x000a0000, +0xe249a008, +0xe5196004, +0xe2877000, +0x000a0000, +0xe3e01000, +0x000a0000, +0xe5090004, +0xe5071000, +0x000d8180, +0xea000000, +0x00050016, +0x0006001f, +0xe3a01000, +0x000a0000, +0xea000000, +0x00050002, +0x00060020, +0xe089b00b, +0xe04aa009, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe2866004, +0xe508b000, +0x000d8180, +0xe1a021aa, +0x0006000c, +0xe58d6008, +0xeb000000, +0x00030000, +0xe5189000, +0x000d8180, +0xe518b000, +0x000d8180, +0xe5192008, +0xe04bb009, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00060021, +0xe92d4ff0, +0xe24dd01c, +0xe1a08000, +0xe5107000, +0x000d8180, +0x00000000, +0xe1a09001, +0xe2877000, +0x000a0000, +0xe58d800c, +0xe3a06000, +0x000a0000, +0xe58d2014, +0xe28d1000, +0x000a0000, +0xe5580000, +0x000d8180, +0xe58d2018, +0xe5081000, +0x000d8180, +0xe58d2010, +0xe3500000, +0xe58d8008, +0x0a000000, +0x00050003, +0xe1a0a009, +0xe5189000, +0x000d8180, +0xe5180000, +0x000d8180, +0xe3a040ff, +0xe5482000, +0x000d8180, +0xe040b009, +0xe5196004, +0xe1a04184, +0xe3e01000, +0x000a0000, +0xe28bb008, +0xe2160000, +0x000a0000, +0xe5071000, +0x000d8180, +0xe58db004, +0x0a000000, +0x00050017, +0xea000000, +0x00050018, +0x00060022, +0xe92d4ff0, +0xe24dd01c, +0xe3a06000, +0x000a0000, +0xe58d3018, +0xea000000, +0x00050001, +0x00060023, +0xe92d4ff0, +0xe24dd01c, +0xe3a06000, +0x000a0000, +0x0006000b, +0xe510b000, +0x000d8180, +0xe58d2014, +0xe1a08000, +0xe58d000c, +0xe1a09001, +0xe508d000, +0x000d8180, +0x00000000, +0xe5187000, +0x000d8180, +0xe58d0008, +0xe58db010, +0xe2877000, +0x000a0000, +0x0006000d, +0xe518c000, +0x000d8180, +0xe5180000, +0x000d8180, +0xe3a040ff, +0xe0866009, +0xe1a04184, +0xe046600c, +0xe3e01000, +0x000a0000, +0xe040b009, +0xe5071000, +0x000d8180, +0x00060024, +0xe14920d8, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050025, +0x00060026, +0xe5096004, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00060027, +0xe92d4ff0, +0xe24dd01c, +0xe1a08000, +0xe510a000, +0x000d8180, +0xe58d000c, +0xe518c000, +0x000d8180, +0xe58d0008, +0xe518b000, +0x000d8180, +0xe04aa00c, +0xe508d000, +0x000d8180, +0xe3a0c000, +0xe58da014, +0xe58dc018, +0xe58db010, +0xe12fff33, +0xe5187000, +0x000d8180, +0xe1b09000, +0xe3a06000, +0x000a0000, +0xe2877000, +0x000a0000, +0x1a000000, +0x0005000d, +0xea000000, +0x00050019, +0x00060015, +0x00000000, +0xe51c2008, +0xe5190010, +0xe1a03009, +0xe1a0900c, +0xe3500000, +0xe513600c, +0x0a000000, +0x00050001, +0xe5122000, +0x000d8180, +0xe3e0e000, +0x000a0000, +0xe08a100b, +0xe501e004, +0xe5125000, +0x000d8180, +0xe12fff10, +0x0006000b, +0xe5192008, +0xe2433010, +0xe043b009, +0xea000000, +0x00050028, +0x00060029, +0xe516e004, +0xe2431010, +0xe1ca20d0, +0xe5089000, +0x000d8180, +0xe004baae, +0xe004a2ae, +0xe089000b, +0xe0510000, +0x11c120f0, +0x11a02000, +0x1a000000, +0x0005002a, +0xe18920fa, +0xea000000, +0x0005002b, +0x0006002c, +0xe089100c, +0xea000000, +0x00050002, +0x0006002d, +0xe2471000, +0x000a0000, +0xe3e03000, +0x000a0000, +0xe581c000, +0xe5813004, +0x0006000c, +0xe3e03000, +0x000a0000, +0xe58db000, +0xe58d3004, +0xe1a0200d, +0xea000000, +0x00050001, +0x0006002e, +0xe004caae, +0xe58db000, +0xe3e03000, +0x000a0000, +0xe089100c, +0xe58d3004, +0xe1a0200d, +0xea000000, +0x00050001, +0x0006002f, +0x00000000, +0xe089100c, +0xe089200b, +0x0006000b, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030001, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500000, +0x0a000000, +0x00050003, +0xe1c020d0, +0xe5d6c000, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000d, +0xe2690000, +0x000a0000, +0xe5189000, +0x000d8180, +0xe3a0b010, +0xe509600c, +0xe0806009, +0xe5192008, +0xea000000, +0x00050026, +0x00060030, +0xe089100c, +0xea000000, +0x00050002, +0x00060031, +0xe2471000, +0x000a0000, +0xe3e03000, +0x000a0000, +0xe581c000, +0xe5813004, +0x0006000c, +0xe3e03000, +0x000a0000, +0xe58db000, +0xe58d3004, +0xe1a0200d, +0xea000000, +0x00050001, +0x00060032, +0xe004caae, +0xe58db000, +0xe3e03000, +0x000a0000, +0xe089100c, +0xe58d3004, +0xe1a0200d, +0xea000000, +0x00050001, +0x00060033, +0xe089100c, +0xe089200b, +0x0006000b, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030002, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500000, +0xe18920da, +0x0a000000, +0x00050003, +0xe5d6c000, +0xe1c020f0, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000d, +0xe2690000, +0x000a0000, +0xe5189000, +0x000d8180, +0xe3a0b018, +0xe1c921f0, +0xe509600c, +0xe0806009, +0xe5192008, +0xea000000, +0x00050026, +0x00060034, +0xe1a00008, +0xe2466004, +0xe1a0100a, +0xe5089000, +0x000d8180, +0xe1a0200b, +0xe58d6008, +0xe20e30ff, +0xeb000000, +0x00030003, +0x0006000d, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500001, +0x8a000000, +0x00050035, +0x0006000e, +0xe1d6c0b2, +0xe2866004, +0xe086c10c, +0x224c6b80, +0x0006002b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00060036, +0xe516e004, +0xe1ca00d0, +0xe00422ae, +0xe18900f2, +0xea000000, +0x0005002b, +0x00060037, +0xe59a1004, +0xe3e00000, +0x000a0000, +0xe1500001, +0xea000000, +0x0005000e, +0x00060038, +0xe59a1004, +0xe3710000, +0x000a0000, +0xea000000, +0x0005000e, +0x00060039, +0xe2466004, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030004, +0xea000000, +0x0005000d, +0x0006003a, +0x00000000, +0xe2466004, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe1a0100e, +0xe58d6008, +0xeb000000, +0x00030005, +0xea000000, +0x0005000d, +0x00000000, +0x0006003b, +0xe004caae, +0xe004b6ae, +0xe089200c, +0xe085300b, +0xea000000, +0x00050001, +0x0006003c, +0xe004caae, +0xe004b6ae, +0xe089300c, +0xe085200b, +0xea000000, +0x00050001, +0x0006003d, +0xe516e008, +0xe2466004, +0xe089200b, +0xe089300b, +0xea000000, +0x00050001, +0x0006003e, +0xe004caae, +0xe004b6ae, +0xe089200c, +0xe089300b, +0x0006000b, +0xe20ec0ff, +0xe089100a, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xe58dc000, +0xeb000000, +0x00030006, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500000, +0x0a000000, +0x0005002b, +0x00060035, +0xe0401009, +0xe500600c, +0xe2816000, +0x000a0000, +0xe1a09000, +0xe3a0b010, +0xea000000, +0x00050024, +0x0006003f, +0xe089100b, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030007, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500000, +0x1a000000, +0x00050035, +0xe799000b, +0xea000000, +0x00050040, +0x00000000, +0xea000000, +0x00050035, +0x00000000, +0x00060025, +0xe1a00008, +0xe508c000, +0x000d8180, +0xe2491008, +0xe58d6008, +0xe089200b, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030008, +0x00000000, +0xe1a0900a, +0x00000000, +0xe5192008, +0xe28bb008, +0xe5096004, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00060041, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe24a1008, +0xe58d6008, +0xe08a200b, +0xeb000000, +0x00030008, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe51a2008, +0xe5196004, +0xe28bb008, +0xea000000, +0x00050042, +0x00060043, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe1a0100a, +0xe58d6008, +0xeb000000, +0x00030009, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe556c004, +0x00000000, +0xe516e004, +0x00000000, +0xe35c0000, +0x000a0000, +0x00000000, +0xe004a2ae, +0xe1a0b82e, +0x00000000, +0x0a000000, +0x00070000, +0x00000000, +0xea000000, +0x00070000, +0x00060044, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x8a000000, +0x00050045, +0xe5196004, +0xe14900f8, +0xe1a0c009, +0xe25ba008, +0xe28bb008, +0x0a000000, +0x00050046, +0x0006000b, +0xe1cc00d8, +0xe25aa008, +0xe0cc00f8, +0x1a000000, +0x0005000b, +0xea000000, +0x00050046, +0x00060047, +0xe5991004, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x33e01000, +0x000a0000, +0xe2613000, +0x000a0000, +0xe1a03183, +0xe18200d3, +0xea000000, +0x00050048, +0x00060049, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x13710000, +0x000a0000, +0x1a000000, +0x00050006, +0x0006000b, +0xe510c000, +0x000d8180, +0x0006000c, +0x00000000, +0xe3e01000, +0x000a0000, +0xe517b000, +0x000d8180, +0xe35c0000, +0x0a000000, +0x00050048, +0xe51c2000, +0x000d8180, +0xe51b3000, +0x000d8180, +0xe51ce000, +0x000d8180, +0xe0022003, +0xe0822082, +0xe08ee182, +0x0006000d, +0xe14e20d0, +0x000c8100, +0xe14e00d0, +0x000c8100, +0xe51ee000, +0x000d8180, +0xe152000b, +0x03730000, +0x000a0000, +0x0a000000, +0x00050005, +0xe35e0000, +0x1a000000, +0x0005000d, +0x0006000e, +0xe1a0000c, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006000f, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050048, +0xea000000, +0x0005000e, +0x00060010, +0xe3710000, +0x000a0000, +0x00000000, +0x21e01001, +0x33a01000, +0x000a0000, +0xe0873101, +0xe513c000, +0x000d8180, +0xea000000, +0x0005000c, +0x0006004a, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x0510c000, +0x000d8180, +0x03730000, +0x000a0000, +0x05503000, +0x000d8180, +0x035c0000, +0x1a000000, +0x00050045, +0xe3130000, +0x000a0000, +0xe5002000, +0x000d8180, +0x0a000000, +0x00050048, +0xe5172000, +0x000d8180, +0xe3c33000, +0x000a0000, +0xe5070000, +0x000d8180, +0xe5403000, +0x000d8180, +0xe5002000, +0x000d8180, +0xea000000, +0x00050048, +0x0006004b, +0xe1c920d0, +0xe35b0010, +0x3a000000, +0x00050045, +0xe1a01002, +0xe3730000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050045, +0xe1a00008, +0xe2892008, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003000a, +0x00000000, +0xe1a0900a, +0x00000000, +0xe1c000d0, +0xea000000, +0x00050048, +0x0006004c, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x9a000000, +0x00050048, +0xea000000, +0x00050045, +0x0006004d, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x0a000000, +0x00050048, +0xe5173000, +0x000d8180, +0xe5089000, +0x000d8180, +0xe3710000, +0x000a0000, +0x93530000, +0xe58d6008, +0x8a000000, +0x00050045, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1a00008, +0xe1a01009, +0xeb000000, +0x0003000b, +0xe5189000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006004f, +0x00000000, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3e03000, +0x000a0000, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe18920fb, +0xe5196004, +0xe1a01000, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe2892008, +0xe58d6008, +0xeb000000, +0x0003000c, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xe3500000, +0x03e01000, +0x000a0000, +0x0a000000, +0x00050048, +0xe1c900d8, +0xe1c921d0, +0xe3a0b000, +0x000a0000, +0xe14900f8, +0xe1c920f0, +0xea000000, +0x00050046, +0x00060050, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0x00000000, +0xe510c000, +0x000d8180, +0x00000000, +0xe14220d0, +0x000c8100, +0xe5196004, +0x00000000, +0xe35c0000, +0x1a000000, +0x00050045, +0x00000000, +0xe3e01000, +0x000a0000, +0xe3a0b000, +0x000a0000, +0xe14920f8, +0xe589100c, +0xea000000, +0x00050046, +0x00060051, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x1a000000, +0x00050045, +0xe510c000, +0x000d8180, +0xe510b000, +0x000d8180, +0xe2822001, +0xe5196004, +0xe152000c, +0xe08bb182, +0xe14920f8, +0x31cb00d0, +0xe3a0b000, +0x000a0000, +0x2a000000, +0x00050002, +0x0006000b, +0xe3710000, +0x000a0000, +0x13a0b000, +0x000a0000, +0x11c900f0, +0xea000000, +0x00050046, +0x0006000c, +0xe510c000, +0x000d8180, +0xe1a01002, +0xe35c0000, +0x0a000000, +0x00050046, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003000d, +0x00000000, +0xe1a0900a, +0x00000000, +0xe3500000, +0x0a000000, +0x00050046, +0xe1c000d0, +0xea000000, +0x0005000b, +0x00060052, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0x00000000, +0xe510c000, +0x000d8180, +0x00000000, +0xe14220d0, +0x000c8100, +0xe5196004, +0x00000000, +0xe35c0000, +0x1a000000, +0x00050045, +0x00000000, +0xe3a00000, +0xe3e01000, +0x000a0000, +0xe3a0b000, +0x000a0000, +0xe14920f8, +0xe1c900f8, +0xea000000, +0x00050046, +0x00060053, +0xe557a000, +0x000d8180, +0xe35b0008, +0x3a000000, +0x00050045, +0xe31a0000, +0x000a0000, +0xe1a0c009, +0xe2899008, +0x03a06000, +0x000a0000, +0x13a06000, +0x000a0000, +0xe24bb008, +0xea000000, +0x00050024, +0x00060054, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe557a000, +0x000d8180, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050045, +0xe1a0c009, +0xe1c900f8, +0xe1c920f0, +0xe31a0000, +0x000a0000, +0xe2899010, +0x03a06000, +0x000a0000, +0x13a06000, +0x000a0000, +0xe24bb010, +0xea000000, +0x00050024, +0x00060055, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050045, +0xe5196004, +0xe5089000, +0x000d8180, +0xe5101000, +0x000d8180, +0xe550a000, +0x000d8180, +0xe510c000, +0x000d8180, +0xe081200b, +0xe081300a, +0xe58d6008, +0xe153000c, +0x0a000000, +0x00050045, +0xe5103000, +0x000d8180, +0xe510c000, +0x000d8180, +0xe35a0000, +0x000a0000, +0x91520003, +0x935c0000, +0x8a000000, +0x00050045, +0x0006000b, +0xe2422008, +0xe2899008, +0xe24bb008, +0xe5002000, +0x000d8180, +0xe5089000, +0x000d8180, +0x0006000c, +0xe18920dc, +0xe15c000b, +0x118120fc, +0xe28cc008, +0x1a000000, +0x0005000c, +0xe3a02000, +0xe1a0a000, +0xe3a03000, +0xeb000000, +0x00050021, +0x0006000e, +0xe51a2000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xe51a3000, +0x000d8180, +0xe5071000, +0x000d8180, +0xe3500000, +0x000a0000, +0x00000000, +0xe5189000, +0x000d8180, +0x8a000000, +0x00050008, +0xe053b002, +0xe5180000, +0x000d8180, +0xe089100b, +0x0a000000, +0x00050006, +0xe1510000, +0xe3a0c000, +0x8a000000, +0x00050009, +0xe24b3008, +0xe50a2000, +0x000d8180, +0x0006000f, +0xe18200dc, +0xe15c0003, +0xe18900fc, +0xe28cc008, +0x1a000000, +0x0005000f, +0x00060010, +0xe3e02000, +0x000a0000, +0xe28bb010, +0x00060011, +0xe5092004, +0xe249a008, +0xe2160000, +0x000a0000, +0xe58d6008, +0xe58db004, +0x0a000000, +0x00050017, +0xea000000, +0x00050018, +0x00060012, +0xe16300d8, +0xe3e02000, +0x000a0000, +0xe3a0b000, +0x000a0000, +0xe50a3000, +0x000d8180, +0xe1c900f0, +0xea000000, +0x00050011, +0x00060013, +0xe1a00008, +0xe1a011ab, +0xeb000000, +0x00030000, +0xe3a00000, +0xea000000, +0x0005000e, +0x00060056, +0x00000000, +0xe5120000, +0x000d8180, +0xe5196004, +0xe5089000, +0x000d8180, +0xe5101000, +0x000d8180, +0xe550a000, +0x000d8180, +0xe510c000, +0x000d8180, +0xe081200b, +0xe081300a, +0xe58d6008, +0xe153000c, +0x0a000000, +0x00050045, +0xe5103000, +0x000d8180, +0xe510c000, +0x000d8180, +0xe35a0000, +0x000a0000, +0x91520003, +0x935c0000, +0x8a000000, +0x00050045, +0x0006000b, +0xe5002000, +0x000d8180, +0xe5089000, +0x000d8180, +0x0006000c, +0xe18920dc, +0xe15c000b, +0x118120fc, +0xe28cc008, +0x1a000000, +0x0005000c, +0xe3a02000, +0xe1a0a000, +0xe3a03000, +0xeb000000, +0x00050021, +0x0006000e, +0xe51a2000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xe51a3000, +0x000d8180, +0xe5071000, +0x000d8180, +0xe3500000, +0x000a0000, +0x00000000, +0xe5189000, +0x000d8180, +0x8a000000, +0x00050008, +0xe053b002, +0xe5180000, +0x000d8180, +0xe089100b, +0x0a000000, +0x00050006, +0xe1510000, +0xe3a0c000, +0x8a000000, +0x00050009, +0xe24b3008, +0xe50a2000, +0x000d8180, +0x0006000f, +0xe18200dc, +0xe15c0003, +0xe18900fc, +0xe28cc008, +0x1a000000, +0x0005000f, +0x00060010, +0xe1a0a009, +0xe28bb008, +0xe2160000, +0x000a0000, +0xe58d6008, +0xe58db004, +0x0a000000, +0x00050017, +0xea000000, +0x00050018, +0x00060012, +0xe1a00008, +0xe1a0100a, +0xeb000000, +0x0003000e, +0x00060013, +0xe1a00008, +0xe1a011ab, +0xeb000000, +0x00030000, +0xe3a00000, +0xea000000, +0x0005000e, +0x00060057, +0xe5180000, +0x000d8180, +0xe089100b, +0xe5089000, +0x000d8180, +0xe3100000, +0x000a0000, +0xe5081000, +0x000d8180, +0x00000000, +0xe3a00000, +0x000a0000, +0xe3a02000, +0x0a000000, +0x00050045, +0xe5082000, +0x000d8180, +0xe5480000, +0x000d8180, +0xea000000, +0x0005001a, +0x00060058, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x0a000000, +0x00050048, +0x8a000000, +0x00050045, +0xe1a02081, +0xe292c980, +0x5a000000, +0x00050002, +0xe3e03ff8, +0xe053cacc, +0xe1a03581, +0xe1a02580, +0xe3833480, +0xe26ce020, +0xe1833aa0, +0x9a000000, +0x00050003, +0xe1822e13, +0xe1a00c33, +0xe1120fc1, +0x12800001, +0xe3510000, +0xb2600000, +0x0006000b, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006000c, +0xe1822000, +0xe1120fc1, +0x03a00000, +0x13e00000, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006000d, +0x03530480, +0x03520000, +0x1a000000, +0x00050004, +0xe3510000, +0x43a00480, +0x4a000000, +0x0005000b, +0x0006000e, +0x00000000, +0xeb000000, +0x00050059, +0xea000000, +0x00050048, +0x0006005a, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x0a000000, +0x00050048, +0x8a000000, +0x00050045, +0xe1a02081, +0xe292c980, +0x5a000000, +0x00050002, +0xe3e03ff8, +0xe053cacc, +0xe1a03581, +0xe1a02580, +0xe3833480, +0xe26ce020, +0xe1833aa0, +0x9a000000, +0x00050003, +0xe1822e13, +0xe1a00c33, +0xe1d22fc1, +0x12900001, +0x614f00d0, +0x00051809, +0x6a000000, +0x00050048, +0xe3510000, +0xb2600000, +0x0006000b, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006000c, +0xe1822000, +0xe1d22fc1, +0x03a00000, +0x13a00001, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006000d, +0x03530480, +0x1a000000, +0x00050004, +0xe3510000, +0x43a00480, +0x4a000000, +0x0005000b, +0x0006000e, +0xeb000000, +0x0005005b, +0x00000000, +0xea000000, +0x00050048, +0x00040007, +0x00060013, +0x00020000, +0x00000000, +0x41e00000, +0x0006005c, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x8a000000, +0x00050045, +0x13c11480, +0x1a000000, +0x00050048, +0xe3500000, +0xb2700000, +0x614f00d0, +0x00051813, +0x00060048, +0xe5196004, +0xe14900f8, +0x0006005d, +0xe3a0b000, +0x000a0000, +0x00060046, +0xe2160000, +0x000a0000, +0x0516e004, +0xe58db004, +0xe249a008, +0x1a000000, +0x00050018, +0xe004caae, +0x0006000f, +0xe15c000b, +0x8a000000, +0x00050006, +0xe00402ae, +0xe5d6c000, +0xe496e004, +0xe04a9000, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00060010, +0xe08a100b, +0xe3e00000, +0x000a0000, +0xe28bb008, +0xe5010004, +0xea000000, +0x0005000f, +0x0006005e, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0x00000000, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003000f, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006005f, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030010, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060060, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030011, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060061, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030012, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060062, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030013, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060063, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030014, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060064, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030015, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060065, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030016, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060066, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030017, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060067, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030018, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060068, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030019, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060069, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003001a, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006006a, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003001b, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006006b, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003001c, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006006c, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003001d, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006006d, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x2a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x0003001e, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x0006006e, +0x0006006f, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0xe14220d0, +0x000c8100, +0xeb000000, +0x0003001f, +0xea000000, +0x00050048, +0x00060070, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030020, +0x00000000, +0xe1a0900a, +0x00000000, +0xea000000, +0x00050048, +0x00060071, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0xe1a0200d, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030021, +0x00000000, +0xe1a0900a, +0x00000000, +0xe59d2000, +0xe3e03000, +0x000a0000, +0xe5196004, +0xe14900f8, +0xe3a0b000, +0x000a0000, +0xe1c920f0, +0xea000000, +0x00050046, +0x00060072, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050045, +0xe2492008, +0xe5196004, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030022, +0x00000000, +0xe1a0900a, +0x00000000, +0xe3a0b000, +0x000a0000, +0xe1c900f0, +0xea000000, +0x00050046, +0x00060073, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0xe3a0a008, +0x1a000000, +0x00050004, +0x0006000b, +0xe18920da, +0xe15a000b, +0x2a000000, +0x00050048, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050003, +0xe1500002, +0xe28aa008, +0xc1a00002, +0xea000000, +0x0005000b, +0x0006000d, +0x8a000000, +0x00050045, +0xeb000000, +0x00030023, +0xe18920da, +0xea000000, +0x00050006, +0x0006000e, +0x8a000000, +0x00050045, +0x0006000f, +0xe18920da, +0xe15a000b, +0x2a000000, +0x00050048, +0xe3730000, +0x000a0000, +0x2a000000, +0x00050007, +0x00060010, +0x00000000, +0xeb000000, +0x00030024, +0xe28aa008, +0x81a00002, +0x81a01003, +0xea000000, +0x0005000f, +0x00060011, +0x8a000000, +0x00050045, +0xe1cd00f0, +0xe1a00002, +0xeb000000, +0x00030023, +0xe1cd20d0, +0xea000000, +0x00050010, +0x00060074, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0xe3a0a008, +0x1a000000, +0x00050004, +0x0006000b, +0xe18920da, +0xe15a000b, +0x2a000000, +0x00050048, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050003, +0xe1500002, +0xe28aa008, +0xb1a00002, +0xea000000, +0x0005000b, +0x0006000d, +0x8a000000, +0x00050045, +0xeb000000, +0x00030023, +0xe18920da, +0xea000000, +0x00050006, +0x0006000e, +0x8a000000, +0x00050045, +0x0006000f, +0x00000000, +0xe18920da, +0xe15a000b, +0x2a000000, +0x00050048, +0xe3730000, +0x000a0000, +0x2a000000, +0x00050007, +0x00060010, +0xeb000000, +0x00030024, +0xe28aa008, +0x31a00002, +0x31a01003, +0xea000000, +0x0005000f, +0x00060011, +0x8a000000, +0x00050045, +0xe1cd00f0, +0xe1a00002, +0xeb000000, +0x00030023, +0xe1cd20d0, +0xea000000, +0x00050010, +0x00060075, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe5100000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060076, +0xe1c900d0, +0xe5196004, +0xe35b0008, +0x03710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe5102000, +0x000d8180, +0xe5500000, +0x000d8180, +0x00000000, +0xe3e01000, +0x000a0000, +0xe3520000, +0x03a0b000, +0x000a0000, +0x13a0b000, +0x000a0000, +0xe14900f8, +0xea000000, +0x00050046, +0x00060077, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe5196004, +0xe35b0008, +0x03710000, +0x000a0000, +0x03d030ff, +0xe3a02001, +0x1a000000, +0x00050045, +0xe58d0000, +0xe1a0100d, +0x00060078, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030025, +0xe5189000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060079, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe1c921d0, +0xe35b0010, +0xe3e0c000, +0x0a000000, +0x00050001, +0x3a000000, +0x00050045, +0x00000000, +0xe3730000, +0x000a0000, +0xe1a0c002, +0x1a000000, +0x00050045, +0x0006000b, +0xe1c920d8, +0xe3710000, +0x000a0000, +0x05101000, +0x000d8180, +0x03730000, +0x000a0000, +0x1a000000, +0x00050045, +0xe2813001, +0xe3520000, +0xb0822003, +0xe3520001, +0xb3a02001, +0xe35c0000, +0xb08cc003, +0xe1cccfcc, +0xe15c0001, +0xe2800000, +0x000a0000, +0xc1a0c001, +0xe0801002, +0xe05c2002, +0xe2822001, +0xaa000000, +0x00050078, +0x0006007a, +0xe2470000, +0x000a0000, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006007b, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe1c920d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x1a000000, +0x00050045, +0xe2523001, +0xe5101000, +0x000d8180, +0x00000000, +0xba000000, +0x0005007a, +0xe3510001, +0x3a000000, +0x0005007a, +0x1a000000, +0x00050045, +0xe517c000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe5100000, +0x000d8180, +0xe15c0002, +0x3a000000, +0x00050045, +0x0006000b, +0xe7c10003, +0xe2533001, +0xaa000000, +0x0005000b, +0xea000000, +0x00050078, +0x0006007c, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe5102000, +0x000d8180, +0xe517c000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1a03002, +0xe2800000, +0x000a0000, +0xe15c0002, +0x3a000000, +0x00050045, +0x0006000b, +0x00000000, +0xe4d0c001, +0xe2533001, +0xba000000, +0x00050078, +0xe7c1c003, +0xea000000, +0x0005000b, +0x0006007d, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe5102000, +0x000d8180, +0xe517c000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe3a03000, +0xe2800000, +0x000a0000, +0xe15c0002, +0x3a000000, +0x00050045, +0x0006000b, +0xe7d0c003, +0xe1530002, +0x2a000000, +0x00050078, +0xe24cb041, +0xe35b001a, +0x322cc020, +0xe7c1c003, +0xe2833001, +0xea000000, +0x0005000b, +0x0006007e, +0xe5170000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe1500001, +0xab000000, +0x0005004e, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0x00000000, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0xe5102000, +0x000d8180, +0xe517c000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe3a03000, +0xe2800000, +0x000a0000, +0xe15c0002, +0x3a000000, +0x00050045, +0x0006000b, +0xe7d0c003, +0xe1530002, +0x2a000000, +0x00050078, +0xe24cb061, +0xe35b001a, +0x322cc020, +0xe7c1c003, +0xe2833001, +0xea000000, +0x0005000b, +0x0006007f, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050045, +0x00000000, +0xe1a0a009, +0x00000000, +0xeb000000, +0x00030026, +0x00000000, +0xe1a0900a, +0x00000000, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060080, +0x8a000000, +0x00050045, +0x00060081, +0xe1a0c081, +0xe29cc980, +0x53a00000, +0x512fff1e, +0xe3e03ff8, +0xe053cacc, +0x4a000000, +0x00050001, +0xe1a03581, +0xe3833480, +0xe1833aa0, +0xe3510000, +0xe1a00c33, +0xb2600000, +0xe12fff1e, +0x0006000b, +0xe28cc015, +0xe1a03c30, +0xe26cc014, +0xe1a00601, +0xe3510000, +0xe1830c10, +0xb2600000, +0xe12fff1e, +0x00060082, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060083, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a02000, +0xe3a0a008, +0x0006000b, +0xe18900da, +0xe15a000b, +0xe28aa008, +0xaa000000, +0x00050002, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe0022000, +0xea000000, +0x0005000b, +0x00060084, +0x00000000, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a02000, +0xe3a0a008, +0x0006000b, +0xe18900da, +0xe15a000b, +0xe28aa008, +0xaa000000, +0x00050002, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1822000, +0xea000000, +0x0005000b, +0x00060085, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a02000, +0xe3a0a008, +0x0006000b, +0xe18900da, +0xe15a000b, +0xe28aa008, +0xaa000000, +0x00050002, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe0222000, +0xea000000, +0x0005000b, +0x0006000c, +0xe3e03000, +0x000a0000, +0xe5196004, +0xe14920f8, +0xea000000, +0x0005005d, +0x00060086, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0x00000000, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe0202860, +0xe3c228ff, +0xe1a00460, +0xe3e01000, +0x000a0000, +0xe0200422, +0xea000000, +0x00050048, +0x00060087, +0xe1c900d0, +0xe35b0008, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1e00000, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060088, +0xe1c900d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe200a01f, +0xe1c900d0, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a00a10, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060089, +0xe1c900d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0x00000000, +0xe200a01f, +0xe1c900d0, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a00a30, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006008a, +0xe1c900d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe200a01f, +0xe1c900d0, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a00a50, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006008b, +0xe1c900d8, +0xe35b0010, +0x3a000000, +0x00050045, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe260a000, +0xe1c900d0, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a00a70, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x0006008c, +0xe1c900d8, +0xe35b0010, +0x3a000000, +0x00050045, +0x00000000, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe200a01f, +0xe1c900d0, +0xe3710000, +0x000a0000, +0x1b000000, +0x00050080, +0xe1a00a70, +0xe3e01000, +0x000a0000, +0xea000000, +0x00050048, +0x00060045, +0xe5192008, +0xe5181000, +0x000d8180, +0xe089000b, +0xe5196004, +0xe5080000, +0x000d8180, +0xe5122000, +0x000d8180, +0xe5089000, +0x000d8180, +0xe2800000, +0x000a0000, +0xe58d6008, +0xe1500001, +0xe1a00008, +0x8a000000, +0x00050005, +0xe12fff32, +0xe5189000, +0x000d8180, +0xe3500000, +0xe1a0b180, +0xe249a008, +0xca000000, +0x00050046, +0x0006000b, +0xe5180000, +0x000d8180, +0xe5192008, +0xe040b009, +0x1a000000, +0x00050028, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00060028, +0xe2160000, +0x000a0000, +0xe3c61000, +0x000a0000, +0x00000000, +0x0516e004, +0x00020000, +0x000412ae, +0xe049c001, +0xea000000, +0x00050024, +0x0006000f, +0xe3a01000, +0x000a0000, +0xeb000000, +0x00030000, +0xe5189000, +0x000d8180, +0xe1500000, +0xea000000, +0x0005000b, +0x0006004e, +0xe1a0a00e, +0xe5089000, +0x000d8180, +0xe089100b, +0xe58d6008, +0xe5081000, +0x000d8180, +0xe1a00008, +0xeb000000, +0x00030027, +0xe5189000, +0x000d8180, +0xe1a0e00a, +0xe5192008, +0xe12fff1e, +0x0006008d, +0x00000000, +0xe5570000, +0x000d8180, +0xe3100000, +0x000a0000, +0x1a000000, +0x00050005, +0xe5171000, +0x000d8180, +0xe3100000, +0x000a0000, +0x1a000000, +0x00050001, +0xe2411001, +0xe3100000, +0x000a0000, +0x15071000, +0x000d8180, +0xea000000, +0x00050001, +0x00000000, +0x0006008e, +0xe5570000, +0x000d8180, +0xe3100000, +0x000a0000, +0x0a000000, +0x00050001, +0x0006000f, +0xe20ec0ff, +0xe087c10c, +0xe51cf000, +0x000d8180, +0x0006008f, +0xe5570000, +0x000d8180, +0xe5171000, +0x000d8180, +0xe3100000, +0x000a0000, +0x1a000000, +0x0005000f, +0xe3100000, +0x000a0000, +0x0a000000, +0x0005000f, +0xe2511001, +0xe5071000, +0x000d8180, +0x0a000000, +0x00050001, +0xe3100000, +0x000a0000, +0x0a000000, +0x0005000f, +0x0006000b, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe1a01006, +0xeb000000, +0x00030028, +0x0006000d, +0xe5189000, +0x000d8180, +0x0006000e, +0x00000000, +0xe556c004, +0xe516e004, +0xe087c10c, +0xe51cc000, +0x000d8180, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00060090, +0xe5130018, +0xe2866004, +0xe58d0004, +0xea000000, +0x0005000e, +0x00060091, +0x00000000, +0xe5192008, +0xe2470000, +0x000a0000, +0xe58d6008, +0xe5122000, +0x000d8180, +0xe1a01006, +0xe5078000, +0x000d8180, +0xe5522000, +0x000d8180, +0xe5089000, +0x000d8180, +0xe0892182, +0xe5082000, +0x000d8180, +0xeb000000, +0x00030029, +0xea000000, +0x0005000d, +0x00000000, +0x00060092, +0xe1a01006, +0x00000000, +0xea000000, +0x00050001, +0x00000000, +0x00060093, +0x00000000, +0xe3861001, +0x0006000b, +0x00000000, +0xe089300b, +0xe58d6008, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe04aa009, +0xe5083000, +0x000d8180, +0xeb000000, +0x0003002a, +0xe5189000, +0x000d8180, +0xe5183000, +0x000d8180, +0xe3a01000, +0xe089a00a, +0xe043b009, +0xe58d1008, +0xe5192008, +0xe516e004, +0xe12fff10, +0x00060094, +0x00000000, +0xe24dd00c, +0xe92d1fff, +0xe59d0040, +0xe59e7000, +0xe28d2040, +0xe3e03000, +0x000a0000, +0xe58d2034, +0xe5073000, +0x000d8180, +0xe5301004, +0xe58d0038, +0xe58d003c, +0xe1a01401, +0xe0800341, +0xe59e1004, +0xe040000e, +0xe5178000, +0x000d8180, +0xe0810120, +0xe5179000, +0x000d8180, +0xe5070000, +0x000d8180, +0xe3a03000, +0xe5078000, +0x000d8180, +0xe5089000, +0x000d8180, +0xe5073000, +0x000d8180, +0xe2470000, +0x000a0000, +0xe1a0100d, +0xeb000000, +0x0003002b, +0xe5181000, +0x000d8180, +0xe5189000, +0x000d8180, +0xe3c11000, +0x000a0000, +0xe1a0d001, +0xe59d6008, +0xe58d800c, +0xea000000, +0x00050001, +0x00000000, +0x00060095, +0x00000000, +0xe59d800c, +0x0006000b, +0xe3500000, +0xba000000, +0x00050003, +0xe1a0b180, +0xe5191008, +0xe58db004, +0xe3a02000, +0xe5111000, +0x000d8180, +0xe5072000, +0x000d8180, +0xe3e03000, +0x000a0000, +0xe5115000, +0x000d8180, +0xe5d6c000, +0xe3a040ff, +0xe496e004, +0xe1a04184, +0xe5073000, +0x000d8180, +0xe35c0000, +0x000a0000, +0xe797c10c, +0xe004a2ae, +0x31a0b82e, +0x224bb008, +0x208aa009, +0xe12fff1c, +0x0006000d, +0xe2601000, +0xe1a00008, +0xeb000000, +0x0003002c, +0x00000000, +0x00060013, +0x3ff00000, +0x00060059, +0xe1a02081, +0xe292c980, +0x5a000000, +0x00050002, +0xe3e03ff3, +0xe053cacc, +0x312fff1e, +0xe3e03001, +0xe1c02c13, +0xe0000c13, +0xe25cc020, +0x51c13c13, +0x51822003, +0x53e03001, +0x50011c13, +0xe1120fc1, +0x012fff1e, +0xe3e03001, +0xe35c0000, +0x51a02c13, +0x43e02000, +0xe28cc020, +0xe0500c13, +0xe0c11002, +0xe12fff1e, +0x0006000c, +0xe1822000, +0xe1120fc1, +0xe3a00000, +0xe2011480, +0x151f3000, +0x00050813, +0x11811003, +0xe12fff1e, +0x0006005b, +0xe1a02081, +0xe292c980, +0x5a000000, +0x00050002, +0xe3e03ff3, +0xe053cacc, +0x312fff1e, +0xe3e03001, +0xe1c02c13, +0xe0000c13, +0xe25cc020, +0x51c13c13, +0x51822003, +0x53e03001, +0x50011c13, +0xe1d22fc1, +0x012fff1e, +0xe3e03001, +0xe35c0000, +0x51a02c13, +0x43e02000, +0xe28cc020, +0xe0500c13, +0xe0c11002, +0xe12fff1e, +0x0006000c, +0xe1822000, +0xe1d22fc1, +0xe3a00000, +0xe2011480, +0x151f3000, +0x00050813, +0x11811003, +0xe12fff1e, +0x00060096, +0x00000000, +0xe1a02081, +0xe292c980, +0x52011480, +0x53a00000, +0x512fff1e, +0xe3e03ff3, +0xe053cacc, +0x312fff1e, +0xe3e03001, +0xe0000c13, +0xe25cc020, +0x50011c13, +0xe12fff1e, +0x00000000, +0x00060097, +0xe92d401f, +0xeb000000, +0x0003002d, +0xeb000000, +0x00050059, +0xe1cd20d8, +0xeb000000, +0x0003001f, +0xe1cd20d0, +0xe2211480, +0xeb000000, +0x0003002e, +0xe28dd014, +0xe8bd8000, +0x00060098, +0xe210c480, +0x42600000, +0xe02cc0c1, +0xe3510000, +0x42611000, +0xe2513001, +0x11500001, +0x03a00000, +0x81110003, +0x00020000, +0x00000003, +0x9a000000, +0x00050001, +0xe16f2f10, +0xe16f3f11, +0xe0433002, +0xe273201f, +0x108ff182, +0xe1a00000, +0x00000000, +0xe1500001, +0x000900a7, +0x20400001, +0x000900a7, +0x00000000, +0x0006000b, +0xe3500000, +0x135c0000, +0x40400001, +0xe030108c, +0x42600000, +0xe12fff1e, +0x00060099, +0xe59dc000, +0xe35c0001, +0x3a000000, +0x0003002e, +0x0a000000, +0x0003002f, +0xe35c0003, +0x3a000000, +0x0003001f, +0x0a000000, +0x0003002d, +0xe35c0005, +0x3a000000, +0x00050097, +0x0a000000, +0x0003001c, +0xe35c0007, +0x32211480, +0x03c11480, +0x912fff1e, +0x00000000, +0xe35c0009, +0x3a000000, +0x0003001d, +0x0a000000, +0x00050009, +0xe35c000b, +0x8a000000, +0x00050009, +0xe92d4010, +0x0a000000, +0x00050001, +0xeb000000, +0x00030024, +0x81a00002, +0x81a01003, +0xe8bd8010, +0x00060013, +0xe7f001f0, +0x0006000b, +0xeb000000, +0x00030024, +0x31a00002, +0x31a01003, +0xe8bd8010, +0x00000000, +0xe7f001f0, +0x00000000, +0x0006009a, +0x00000000, +0xe92d4830, +0xe1a04000, +0xe5100000, +0x000d8180, +0xe5541000, +0x000d8180, +0xe2842000, +0x000a0000, +0xe1a0b00d, +0xe04dd000, +0xe2511001, +0xe514c000, +0x000d8180, +0x4a000000, +0x00050002, +0x0006000b, +0xe7923101, +0xe78d3101, +0xe2511001, +0x5a000000, +0x0005000b, +0x0006000c, +0xe5140000, +0x000d8180, +0xe5141000, +0x000d8180, +0xe5142000, +0x000d8180, +0xe5143000, +0x000d8180, +0xe12fff3c, +0xe1a0d00b, +0xe5040000, +0x000d8180, +0xe5041000, +0x000d8180, +0xe8bd8830, +0x00000000, +0x00080000, +0x00000000, +0xe1a0b18b, +0xe1aa00d9, +0xe1d6c0b2, +0xe1ab20d9, +0xe2866004, +0xe086c10c, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050003, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050004, +0xe1500002, +0x00000000, +0xb24c6b80, +0x00000000, +0xa24c6b80, +0x00000000, +0xd24c6b80, +0x00000000, +0xc24c6b80, +0x00000000, +0x0006000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000d, +0x8a000000, +0x00050034, +0xe3730000, +0x000a0000, +0x31a0a00c, +0x3a000000, +0x00050005, +0xe1a00002, +0xe1a0b00a, +0xe1a0a00c, +0xeb000000, +0x00030023, +0xe1a02000, +0xe1a03001, +0xe1cb00d0, +0xea000000, +0x00050005, +0x0006000e, +0x8a000000, +0x00050034, +0xe1a0a00c, +0xeb000000, +0x00030023, +0xe1cb20d0, +0x0006000f, +0xeb000000, +0x00030024, +0x00000000, +0x324a6b80, +0x00000000, +0x224a6b80, +0x00000000, +0x924a6b80, +0x00000000, +0x824a6b80, +0x00000000, +0xea000000, +0x0005000b, +0x00000000, +0xe1a0b18b, +0xe1aa00d9, +0xe1d6c0b2, +0xe1ab20d9, +0xe2866004, +0xe086c10c, +0xe3710000, +0x000a0000, +0x93730000, +0x000a0000, +0x00000000, +0x9a000000, +0x0005009b, +0x00000000, +0x9a000000, +0x0005009c, +0x00000000, +0xe3710000, +0x000a0000, +0x13730000, +0x000a0000, +0x0a000000, +0x0005003a, +0x00000000, +0xe1510003, +0x1a000000, +0x00050002, +0xe3710000, +0x000a0000, +0x2a000000, +0x00050001, +0xe1500002, +0x00000000, +0x1a000000, +0x00050003, +0x0006000b, +0xe24c6b80, +0x0006000c, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000d, +0xe3710000, +0x000a0000, +0x8a000000, +0x0005000c, +0x00000000, +0x0a000000, +0x00050001, +0xe3710000, +0x000a0000, +0x8a000000, +0x00050002, +0x00000000, +0xe510a000, +0x000d8180, +0xe35a0000, +0x00000000, +0x0a000000, +0x0005000c, +0x00000000, +0x0a000000, +0x00050002, +0x00000000, +0xe55aa000, +0x000d8180, +0xe3a03000, +0x000a0000, +0xe1a01000, +0xe31a0000, +0x000a0000, +0x0a000000, +0x00050039, +0x00000000, +0xea000000, +0x0005000c, +0x00000000, +0x0006000c, +0xe24c6b80, +0x0006000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1e0b00b, +0xe18900da, +0xe1d6c0b2, +0xe795210b, +0xe2866004, +0xe086c10c, +0xe3710000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050007, +0xe1500002, +0x00000000, +0x01500002, +0x00000000, +0x024c6b80, +0x0006000b, +0x00000000, +0x0006000b, +0x124c6b80, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0x00060011, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005000b, +0xea000000, +0x0005003a, +0x00000000, +0xe1a0b18b, +0xe1aa00d9, +0xe1d6c0b2, +0xe1ab20d5, +0xe2866004, +0xe086c10c, +0x00000000, +0x0006009b, +0x00000000, +0x0006009c, +0x00000000, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050003, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050004, +0xe1500002, +0x00000000, +0x024c6b80, +0x0006000b, +0x00000000, +0x0006000b, +0x124c6b80, +0x00000000, +0x0006000c, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000d, +0x00000000, +0x8a000000, +0x00050007, +0x00000000, +0x824c6b80, +0x00000000, +0x8a000000, +0x0005000c, +0x00000000, +0xe3730000, +0x000a0000, +0x31a0a00c, +0x3a000000, +0x00050005, +0xe1a00002, +0xe1a0b00a, +0x0006000e, +0xe1a0a00c, +0xeb000000, +0x00030023, +0xe1cb20d0, +0x0006000f, +0xeb000000, +0x00030030, +0x00000000, +0x024a6b80, +0x00000000, +0x124a6b80, +0x00000000, +0xea000000, +0x0005000c, +0x00000000, +0x00060011, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005000b, +0xea000000, +0x0005003a, +0x00000000, +0xe18900da, +0xe1d6c0b2, +0xe2866004, +0xe1e0b00b, +0xe086c10c, +0x00000000, +0xe3710000, +0x000a0000, +0x0a000000, +0x0005003a, +0x00000000, +0xe151000b, +0x00000000, +0x024c6b80, +0x00000000, +0x124c6b80, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe089b18b, +0xe1d6c0b2, +0xe1cb00d0, +0xe2866004, +0xe086c10c, +0xe3710000, +0x000a0000, +0x00000000, +0x924c6b80, +0x00000000, +0x918900fa, +0x00000000, +0x824c6b80, +0x00000000, +0x818900fa, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a0b18b, +0xe5d6c000, +0xe18900db, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe089b18b, +0xe5d6c000, +0xe59b0004, +0xe089a00a, +0xe496e004, +0xe3700000, +0x000a0000, +0x93e01000, +0x000a0000, +0x83e01000, +0x000a0000, +0xe58a1004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a0b18b, +0xe18900db, +0xe5d6c000, +0xe496e004, +0xe3710000, +0x000a0000, +0x8a000000, +0x0005003d, +0x12211480, +0x1a000000, +0x00050005, +0x02700000, +0x614f00d0, +0x00051809, +0x0006000f, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00040007, +0x00060013, +0x00020000, +0x00000000, +0x41e00000, +0x00000000, +0xe1a0b18b, +0xe18900db, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050002, +0xe5100000, +0x000d8180, +0x0006000b, +0xe3e01000, +0x000a0000, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000c, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005003f, +0x00000000, +0xe5102000, +0x000d8180, +0xe3520000, +0x1a000000, +0x00050009, +0x0006000d, +0x00000000, +0x00060040, +0x00000000, +0xe1a0b009, +0x00000000, +0xeb000000, +0x00030026, +0x00000000, +0xe1a0900b, +0x00000000, +0xea000000, +0x0005000b, +0x00000000, +0x00060013, +0xe5523000, +0x000d8180, +0xe3130000, +0x000a0000, +0x1a000000, +0x0005000d, +0xea000000, +0x0005003f, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe5d6c000, +0x00000000, +0xe3730000, +0x000a0000, +0x03710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050005, +0xe0900002, +0x00000000, +0x6a000000, +0x0005003b, +0x00000000, +0x6a000000, +0x0005003c, +0x00000000, +0x6a000000, +0x0005003e, +0x00000000, +0x0006000e, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x0003002e, +0xe5d6c000, +0xea000000, +0x0005000e, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe5d6c000, +0x00000000, +0xe3730000, +0x000a0000, +0x03710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050005, +0xe0500002, +0x00000000, +0x6a000000, +0x0005003b, +0x00000000, +0x6a000000, +0x0005003c, +0x00000000, +0x6a000000, +0x0005003e, +0x00000000, +0x0006000e, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x0003002f, +0xe5d6c000, +0xea000000, +0x0005000e, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe5d6c000, +0x00000000, +0xe3730000, +0x000a0000, +0x03710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050005, +0xe0cb0092, +0xe15b0fc0, +0x00000000, +0x1a000000, +0x0005003b, +0x00000000, +0x1a000000, +0x0005003c, +0x00000000, +0x1a000000, +0x0005003e, +0x00000000, +0x0006000e, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x0003001f, +0xe5d6c000, +0xea000000, +0x0005000e, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x0003002d, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe3730000, +0x000a0000, +0x03710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x03730000, +0x000a0000, +0x00000000, +0x1a000000, +0x00050005, +0xe1b01002, +0x00000000, +0x0a000000, +0x0005003b, +0x00000000, +0x0a000000, +0x0005003c, +0x00000000, +0x0a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x00050098, +0xe3e01000, +0x000a0000, +0x0006000e, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xeb000000, +0x00050097, +0xea000000, +0x0005000e, +0x00000000, +0xe004caae, +0xe004b6ae, +0x00000000, +0xe18900dc, +0xe18520db, +0x00000000, +0xe18920dc, +0xe18500db, +0x00000000, +0xe18900dc, +0xe18920db, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003b, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003c, +0x00000000, +0xe3730000, +0x000a0000, +0x33710000, +0x000a0000, +0x00000000, +0xe3710000, +0x000a0000, +0x33730000, +0x000a0000, +0x00000000, +0x2a000000, +0x0005003e, +0x00000000, +0xe1a0b009, +0x00000000, +0xeb000000, +0x0003001c, +0x00000000, +0xe1a0900b, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe004baae, +0xe004c6ae, +0xe04c200b, +0xe5089000, +0x000d8180, +0xe089100c, +0x0006002a, +0xe1a00008, +0xe58d6008, +0xe1a021a2, +0xeb000000, +0x00030031, +0xe5189000, +0x000d8180, +0xe3500000, +0x1a000000, +0x00050035, +0xe18920db, +0xe5d6c000, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1e0b00b, +0xe5d6c000, +0xe795010b, +0xe3e01000, +0x000a0000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1e0b00b, +0xe5d6c000, +0xe795010b, +0xe3e01000, +0x000a0000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a0084e, +0xe3e01000, +0x000a0000, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a0b18b, +0xe5d6c000, +0xe18500db, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe089a00a, +0xe1e0b00b, +0xe5d6c000, +0xe496e004, +0xe58ab004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe089a00a, +0xe089b18b, +0xe3e00000, +0x000a0000, +0xe58a0004, +0xe28aa008, +0x0006000b, +0xe58a0004, +0xe15a000b, +0xe28aa008, +0xba000000, +0x0005000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5191008, +0xe1a0b10b, +0xe28bb000, +0x000a0000, +0xe791100b, +0xe5111000, +0x000d8180, +0xe1c120d0, +0xe5d6c000, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5191008, +0xe1a0a0aa, +0xe28aa000, +0x000a0000, +0xe1a0b18b, +0xe791100a, +0xe18920db, +0xe551c000, +0x000d8180, +0xe551b000, +0x000d8180, +0xe5111000, +0x000d8180, +0xe31c0000, +0x000a0000, +0xe283c000, +0x000a0000, +0x135b0000, +0xe1c120f0, +0x1a000000, +0x00050002, +0x0006000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000c, +0xe37c0000, +0x000a0000, +0x8552b000, +0x000d8180, +0x9a000000, +0x0005000b, +0xe2470000, +0x000a0000, +0xe31b0000, +0x000a0000, +0x00000000, +0x0a000000, +0x0005000b, +0xe1a0b009, +0xeb000000, +0x00030032, +0xe1a0900b, +0x00000000, +0x1b000000, +0x00030032, +0x00000000, +0xea000000, +0x0005000b, +0x00000000, +0xe5191008, +0xe1a0a0aa, +0xe28aa000, +0x000a0000, +0xe1e0b00b, +0xe791100a, +0xe795210b, +0xe3e03000, +0x000a0000, +0xe551c000, +0x000d8180, +0xe5111000, +0x000d8180, +0xe551b000, +0x000d8180, +0xe31c0000, +0x000a0000, +0xe552c000, +0x000d8180, +0xe1c120f0, +0x1a000000, +0x00050002, +0x0006000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000c, +0xe31c0000, +0x000a0000, +0x135b0000, +0xe2470000, +0x000a0000, +0x00000000, +0x0a000000, +0x0005000b, +0xe1a0b009, +0xeb000000, +0x00030032, +0xe1a0900b, +0x00000000, +0x1b000000, +0x00030032, +0x00000000, +0xea000000, +0x0005000b, +0x00000000, +0xe5191008, +0xe1a0a0aa, +0xe28aa000, +0x000a0000, +0xe1a0b18b, +0xe791100a, +0xe18520db, +0xe5111000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe1c120f0, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5191008, +0xe1a0a0aa, +0xe28aa000, +0x000a0000, +0xe791100a, +0xe1e0b00b, +0xe5111000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe581b004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5182000, +0x000d8180, +0xe086b10b, +0xe5089000, +0x000d8180, +0xe3520000, +0xe24b6b80, +0x0a000000, +0x00050001, +0xe1a00008, +0xe089100a, +0xeb000000, +0x00030033, +0xe5189000, +0x000d8180, +0x0006000b, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1e0b00b, +0xe5089000, +0x000d8180, +0xe795110b, +0xe58d6008, +0xe5192008, +0xe1a00008, +0xeb000000, +0x00030034, +0xe5189000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1e0b00b, +0x00000000, +0xe5172000, +0x000d8180, +0xe5173000, +0x000d8180, +0xe5089000, +0x000d8180, +0xe58d6008, +0xe1520003, +0xe1a00008, +0x2a000000, +0x00050005, +0x0006000b, +0x00000000, +0xe1a01a8b, +0xe1a025ab, +0xe1a0bac1, +0xe1a01aa1, +0xe37b0001, +0x02811002, +0xeb000000, +0x00030035, +0x00000000, +0xe795110b, +0xeb000000, +0x00030036, +0x00000000, +0xe5189000, +0x000d8180, +0xe3e01000, +0x000a0000, +0xe5d6c000, +0xe496e004, +0xe18900fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xeb000000, +0x00030037, +0xe1a00008, +0xea000000, +0x0005000b, +0x00000000, +0xe5191008, +0xe1e0b00b, +0xe5110000, +0x000d8180, +0xe795b10b, +0x00000000, +0xea000000, +0x0005009d, +0x00000000, +0xea000000, +0x0005009e, +0x00000000, +0xe004caae, +0xe004b6ae, +0xe18900dc, +0xe18920db, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005002f, +0xe3730000, +0x000a0000, +0x05103000, +0x000d8180, +0x05101000, +0x000d8180, +0x1a000000, +0x00050009, +0xe0833182, +0xe1520001, +0x31c320d0, +0x2a000000, +0x0005002f, +0xe5d6c000, +0xe3730000, +0x000a0000, +0x0a000000, +0x00050005, +0x0006000b, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe5101000, +0x000d8180, +0xe3510000, +0x0a000000, +0x0005000b, +0xe5511000, +0x000d8180, +0xe3110000, +0x000a0000, +0x1a000000, +0x0005000b, +0xe004caae, +0xea000000, +0x0005002f, +0x00060013, +0xe3730000, +0x000a0000, +0x01a0b002, +0x0a000000, +0x0005009d, +0xea000000, +0x0005002f, +0x00000000, +0xe004caae, +0xe20bb0ff, +0xe18900dc, +0xe1e0b00b, +0xe795b10b, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005002c, +0x0006009d, +0xe5102000, +0x000d8180, +0xe51b3000, +0x000d8180, +0xe510e000, +0x000d8180, +0xe1a0c000, +0xe0022003, +0xe0822082, +0xe08ee182, +0x0006000b, +0xe14e00d0, +0x000c8100, +0xe14e20d0, +0x000c8100, +0xe51ee000, +0x000d8180, +0xe150000b, +0x03710000, +0x000a0000, +0x1a000000, +0x00050004, +0xe3730000, +0x000a0000, +0x0a000000, +0x00050005, +0x0006000d, +0xe5d6c000, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000e, +0xe35e0000, +0x1a000000, +0x0005000b, +0x0006000f, +0xe51c0000, +0x000d8180, +0xe3a02000, +0xe3e03000, +0x000a0000, +0xe3500000, +0x0a000000, +0x0005000d, +0xe5501000, +0x000d8180, +0x00000000, +0xe3110000, +0x000a0000, +0x1a000000, +0x0005000d, +0xea000000, +0x0005002d, +0x00000000, +0xe004caae, +0xe20bb0ff, +0xe18900dc, +0xe3710000, +0x000a0000, +0x1a000000, +0x0005002e, +0xe5102000, +0x000d8180, +0xe5103000, +0x000d8180, +0xe1a0118b, +0xe15b0002, +0x318320d1, +0x2a000000, +0x0005002e, +0xe5d6c000, +0xe3730000, +0x000a0000, +0x0a000000, +0x00050005, +0x0006000b, +0xe496e004, +0xe18920fa, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe5101000, +0x000d8180, +0xe3510000, +0x0a000000, +0x0005000b, +0xe5511000, +0x000d8180, +0xe3110000, +0x000a0000, +0x1a000000, +0x0005000b, +0xea000000, +0x0005002e, +0x00000000, +0xe004caae, +0xe004b6ae, +0xe18900dc, +0xe18920db, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050033, +0xe3730000, +0x000a0000, +0x05101000, +0x000d8180, +0x05103000, +0x000d8180, +0x1a000000, +0x00050009, +0xe0811182, +0xe1520003, +0x3591e004, +0x2a000000, +0x00050033, +0xe5d6c000, +0xe37e0000, +0x000a0000, +0xe550e000, +0x000d8180, +0xe18920da, +0x0a000000, +0x00050005, +0x0006000b, +0xe31e0000, +0x000a0000, +0xe1c120f0, +0x1a000000, +0x00050007, +0x0006000c, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe510a000, +0x000d8180, +0xe35a0000, +0x0a000000, +0x0005000b, +0xe55aa000, +0x000d8180, +0xe31a0000, +0x000a0000, +0x1a000000, +0x0005000b, +0xe516e004, +0xe004caae, +0xe004a2ae, +0xea000000, +0x00050033, +0x00060011, +0x00000000, +0xe5172000, +0x000d8180, +0xe3cee000, +0x000a0000, +0xe5070000, +0x000d8180, +0xe540e000, +0x000d8180, +0xe5002000, +0x000d8180, +0xea000000, +0x0005000c, +0x00060013, +0xe3730000, +0x000a0000, +0x01a0b002, +0x0a000000, +0x0005009e, +0xea000000, +0x00050033, +0x00000000, +0xe004caae, +0xe20bb0ff, +0xe18900dc, +0xe1e0b00b, +0xe795b10b, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050030, +0x0006009e, +0xe5102000, +0x000d8180, +0xe51b3000, +0x000d8180, +0xe510e000, +0x000d8180, +0xe1a0c000, +0xe0022003, +0xe0822082, +0xe3a03000, +0xe08ee182, +0xe54c3000, +0x000d8180, +0x0006000b, +0xe14e00d0, +0x000c8100, +0xe51e3000, +0x000d8180, +0xe51e2000, +0x000d8180, +0xe150000b, +0x03710000, +0x000a0000, +0x1a000000, +0x00050005, +0xe55c1000, +0x000d8180, +0xe3730000, +0x000a0000, +0xe18920da, +0x0a000000, +0x00050004, +0x0006000c, +0xe3110000, +0x000a0000, +0xe14e20f0, +0x000c8100, +0x1a000000, +0x00050007, +0x0006000d, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000e, +0x00000000, +0xe51c0000, +0x000d8180, +0xe3500000, +0x0a000000, +0x0005000c, +0xe5500000, +0x000d8180, +0xe3100000, +0x000a0000, +0x1a000000, +0x0005000c, +0xea000000, +0x00050031, +0x0006000f, +0xe1b0e002, +0x1a000000, +0x0005000b, +0xe51c0000, +0x000d8180, +0xe1a0200d, +0xe58d6008, +0xe3500000, +0xe5089000, +0x000d8180, +0x15501000, +0x000d8180, +0xe1a00008, +0x0a000000, +0x00050006, +0xe3110000, +0x000a0000, +0x0a000000, +0x00050031, +0x00060010, +0xe3e03000, +0x000a0000, +0xe58db000, +0xe1a0100c, +0xe58d3004, +0xeb000000, +0x00030038, +0xe5189000, +0x000d8180, +0xe18920da, +0xe1c020f0, +0xea000000, +0x0005000d, +0x00060011, +0xe5172000, +0x000d8180, +0xe3c11000, +0x000a0000, +0x00000000, +0xe507c000, +0x000d8180, +0xe54c1000, +0x000d8180, +0xe50c2000, +0x000d8180, +0xea000000, +0x0005000d, +0x00000000, +0xe004caae, +0xe20bb0ff, +0xe18900dc, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050032, +0xe5102000, +0x000d8180, +0xe510c000, +0x000d8180, +0xe1a0118b, +0xe15b0002, +0x31a120dc, +0x2a000000, +0x00050032, +0xe5d6c000, +0xe3730000, +0x000a0000, +0xe550e000, +0x000d8180, +0xe18920da, +0x0a000000, +0x00050005, +0x0006000b, +0xe31e0000, +0x000a0000, +0xe1c120f0, +0x1a000000, +0x00050007, +0x0006000c, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe510a000, +0x000d8180, +0xe35a0000, +0x0a000000, +0x0005000b, +0xe55aa000, +0x000d8180, +0xe31a0000, +0x000a0000, +0x1a000000, +0x0005000b, +0xe516e004, +0xe004a2ae, +0xea000000, +0x00050032, +0x00060011, +0xe5172000, +0x000d8180, +0xe3cee000, +0x000a0000, +0x00000000, +0xe5070000, +0x000d8180, +0xe540e000, +0x000d8180, +0xe5002000, +0x000d8180, +0xea000000, +0x0005000c, +0x00000000, +0xe089a00a, +0x0006000b, +0xe59dc004, +0xe51a1008, +0xe795018b, +0xe25cc008, +0xe5113000, +0x000d8180, +0x0a000000, +0x00050004, +0xe08021ac, +0xe1520003, +0xe5113000, +0x000d8180, +0xe08ac00c, +0x8a000000, +0x00050005, +0xe083e180, +0xe5510000, +0x000d8180, +0x0006000d, +0xe0ca20d8, +0xe0ce20f8, +0xe15a000c, +0x3a000000, +0x0005000d, +0xe3100000, +0x000a0000, +0x1a000000, +0x00050007, +0x0006000e, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe5089000, +0x000d8180, +0xe1a00008, +0xe58d6008, +0xeb000000, +0x00030039, +0x00000000, +0xe5189000, +0x000d8180, +0x00000000, +0xea000000, +0x0005000b, +0x00060011, +0xe5171000, +0x000d8180, +0xe3c00000, +0x000a0000, +0xe507c000, +0x000d8180, +0xe54c0000, +0x000d8180, +0xe50c1000, +0x000d8180, +0xea000000, +0x0005000e, +0x00000000, +0xe59d0004, +0xe004b6ae, +0xe08bb000, +0xea000000, +0x0005009f, +0x00000000, +0xe004b6ae, +0x0006009f, +0xe1a0c009, +0xe1a920da, +0xe24bb008, +0xe2899008, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050025, +0xe5096004, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00000000, +0xe59d0004, +0xe080b18b, +0xea000000, +0x000500a0, +0x00000000, +0xe1a0b18b, +0x000600a0, +0xe1aa20d9, +0xe24bb008, +0xe28aa008, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050041, +0xe5196004, +0x00060042, +0xe3a0c000, +0xe5523000, +0x000d8180, +0xe3160000, +0x000a0000, +0x1a000000, +0x00050007, +0x0006000b, +0xe5092008, +0xe35b0000, +0x0a000000, +0x00050003, +0x0006000c, +0xe18a00dc, +0xe28ce008, +0xe15e000b, +0xe18900fc, +0xe1a0c00e, +0x1a000000, +0x0005000c, +0x0006000d, +0xe3530001, +0x8a000000, +0x00050005, +0x0006000e, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x0006000f, +0xe516e004, +0xe004a2ae, +0xe049000a, +0xe5100010, +0xe5100000, +0x000d8180, +0xe5105000, +0x000d8180, +0xea000000, +0x0005000e, +0x00060011, +0xe2266000, +0x000a0000, +0xe3160000, +0x000a0000, +0x00000000, +0x13a03000, +0x1a000000, +0x0005000b, +0xe0499006, +0xe5196004, +0xe3160000, +0x000a0000, +0x13a03000, +0xea000000, +0x0005000b, +0x00000000, +0xe089a00a, +0xe1a0c009, +0xe14a21d0, +0xe14a00d8, +0xe28a9008, +0xe1ca20f8, +0xe1ca01f0, +0xe14a21d8, +0xe3a0b010, +0xe1ca20f0, +0xe3730000, +0x000a0000, +0x1a000000, +0x00050025, +0xe5096004, +0xe5126000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe08aa009, +0xe12fff1c, +0x00000000, +0xe089a00a, +0xe51ac010, +0xe51a0008, +0xe51ce000, +0x000d8180, +0xe51c1000, +0x000d8180, +0xe2866004, +0x0006000b, +0xe050b00e, +0xe0812180, +0x2a000000, +0x00050005, +0xe1c220d0, +0xe3730000, +0x000a0000, +0x02800001, +0x0a000000, +0x0005000b, +0xe156b0b2, +0xe3e01000, +0x000a0000, +0xe1ca20f8, +0xe086b10b, +0xe280c001, +0xe1ca00f0, +0xe24b6b80, +0xe50ac008, +0x0006000d, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe51c3000, +0x000d8180, +0xe51cc000, +0x000d8180, +0x00060010, +0xe08b008b, +0xe15b0003, +0xe08c2180, +0x8a000000, +0x0005000d, +0xe14200d0, +0x000c8100, +0xe3710000, +0x000a0000, +0xe28bb001, +0x0a000000, +0x00050010, +0xe156c0b2, +0xe08bb00e, +0xe14220d0, +0x000c8100, +0xe50ab008, +0xe1ca00f8, +0xe086b10c, +0xe24b6b80, +0xe1ca20f0, +0xea000000, +0x0005000d, +0x00000000, +0xe089a00a, +0xe086b10b, +0xe14a01d8, +0xe51a200c, +0xe51a3004, +0xe3710000, +0x000a0000, +0x05500000, +0x000d8180, +0x03720000, +0x000a0000, +0x03730000, +0x000a0000, +0x03500000, +0x000a0000, +0x024b6b80, +0x1a000000, +0x00050005, +0xe5d6c000, +0xe496e004, +0xe3a00000, +0xe50a0008, +0x0006000b, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe3a00000, +0x000a0000, +0xe3a0c000, +0x000a0000, +0xe5460004, +0xe24b6b80, +0xe5c6c000, +0xe496e004, +0xea000000, +0x0005000b, +0x00000000, +0xe004caae, +0xe004b6ae, +0xe5190004, +0xe089b00b, +0xe089a00a, +0xe28bb000, +0x000a0000, +0xe08a300c, +0xe2492008, +0xe04bb000, +0xe35c0000, +0xe042000b, +0x0a000000, +0x00050005, +0xe2433010, +0x0006000b, +0xe15b0002, +0x30cb00d8, +0x23e01000, +0x000a0000, +0xe15a0003, +0xe0ca00f8, +0x3a000000, +0x0005000b, +0x0006000c, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000f, +0xe5183000, +0x000d8180, +0xe3500000, +0xd3a0c008, +0xc280c008, +0xe08a1000, +0xe58dc004, +0xda000000, +0x0005000c, +0xe1510003, +0x8a000000, +0x00050007, +0x00060010, +0xe0cb00d8, +0xe0ca00f8, +0xe15b0002, +0x3a000000, +0x00050010, +0xea000000, +0x0005000c, +0x00060011, +0xe1a011a0, +0xe508a000, +0x000d8180, +0xe1a00008, +0xe5089000, +0x000d8180, +0xe04bb009, +0xe58d6008, +0xe04aa009, +0xeb000000, +0x00030000, +0xe5189000, +0x000d8180, +0xe089a00a, +0xe089b00b, +0xe2492008, +0xea000000, +0x00050010, +0x00000000, +0xe59d0004, +0xe5196004, +0xe089a00a, +0xe080b18b, +0xea000000, +0x000500a1, +0x00000000, +0xe5196004, +0xe1a0b18b, +0xe089a00a, +0x000600a1, +0xe58db004, +0x0006000b, +0xe2160000, +0x000a0000, +0xe2261000, +0x000a0000, +0x1a000000, +0x000500a2, +0x00060017, +0xe516e004, +0xe25b3008, +0xe2492008, +0x0a000000, +0x00050003, +0x0006000c, +0xe0ca00d8, +0xe2899008, +0xe2533008, +0xe14901f0, +0x1a000000, +0x0005000c, +0x0006000d, +0xe004a2ae, +0xe042300a, +0xe004caae, +0xe5130008, +0x0006000f, +0xe15c000b, +0x8a000000, +0x00050006, +0xe1a09003, +0xe5101000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe5115000, +0x000d8180, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00060010, +0xe3e01000, +0x000a0000, +0xe2899008, +0xe28bb008, +0xe509100c, +0xea000000, +0x0005000f, +0x000600a3, +0xe089a00a, +0x000600a2, +0xe3110000, +0x000a0000, +0x1a000000, +0x00050018, +0xe0499001, +0xe5196004, +0xea000000, +0x0005000b, +0x00000000, +0xe5196004, +0xe1a0b18b, +0xe58db004, +0xe2160000, +0x000a0000, +0xe2261000, +0x000a0000, +0x0516e004, +0x1a000000, +0x000500a3, +0x00000000, +0xe18900da, +0x00000000, +0xe2493008, +0xe004a2ae, +0x00000000, +0xe1c300f0, +0x00000000, +0xe043900a, +0xe004caae, +0xe5190008, +0x0006000f, +0xe15c000b, +0x8a000000, +0x00050006, +0xe5101000, +0x000d8180, +0xe5d6c000, +0xe496e004, +0xe5115000, +0x000d8180, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00060010, +0xe2431004, +0xe3e02000, +0x000a0000, +0xe781200b, +0xe28bb008, +0xea000000, +0x0005000f, +0x00000000, +0xe1a000a6, +0xe200007e, +0xe2400000, +0x000a0000, +0xe19710b0, +0xe2511001, +0xe18710b0, +0x0a000000, +0x00050091, +0x00000000, +0xe1aa00d9, +0x00000000, +0xe086b10b, +0x00000000, +0xe1ca20d8, +0xe3710000, +0x000a0000, +0xe59ac014, +0x1a000000, +0x00050005, +0xe3730000, +0x000a0000, +0xe59a3010, +0x037c0000, +0x000a0000, +0x1a000000, +0x00050043, +0xe3530000, +0xba000000, +0x00050004, +0xe1500002, +0x00000000, +0xe1ca21d0, +0xe3710000, +0x000a0000, +0x1a000000, +0x00050005, +0xe0900002, +0xe59a3008, +0x00000000, +0x6286bb80, +0x00000000, +0x6a000000, +0x00050002, +0x00000000, +0xe3520000, +0xba000000, +0x00050004, +0xe1500003, +0x00000000, +0x0006000b, +0x00000000, +0xc24b6b80, +0x00000000, +0xe24b6b80, +0xd156b0b2, +0x00000000, +0xd24b6b80, +0x00000000, +0xe1ca00f0, +0x00000000, +0x0006000c, +0xe5d6c000, +0xe496e004, +0xe1ca01f8, +0x00000000, +0xda000000, +0x00070000, +0x00000000, +0x0006000d, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x0006000e, +0x00000000, +0xe1520000, +0x00000000, +0xe1530000, +0x00000000, +0xea000000, +0x0005000b, +0x0006000f, +0x00000000, +0x33730000, +0x000a0000, +0x337c0000, +0x000a0000, +0x2a000000, +0x00050043, +0xe35c0000, +0xe1ca00f0, +0xe1ca01f8, +0xba000000, +0x00050008, +0x00000000, +0xe3530000, +0xba000000, +0x00050008, +0xeb000000, +0x0003002e, +0xe1ca00f0, +0xe1ca20d8, +0xe1ca01f8, +0x00000000, +0x00060010, +0xeb000000, +0x00030024, +0x00000000, +0x824b6b80, +0x00000000, +0xe24b6b80, +0x9156b0b2, +0x9a000000, +0x00070000, +0x00000000, +0x924b6b80, +0x00000000, +0x9a000000, +0x00070000, +0x00000000, +0xe5d6c000, +0xe496e004, +0xea000000, +0x0005000d, +0x00060012, +0x00000000, +0xeb000000, +0x0003002e, +0xe1ca00f0, +0xe1ca01f8, +0x00000000, +0xe1a02000, +0xe1a03001, +0xe1ca00d8, +0xea000000, +0x00050010, +0x00000000, +0xe1a000a6, +0xe200007e, +0xe2400000, +0x000a0000, +0xe19710b0, +0xe2511001, +0xe18710b0, +0x0a000000, +0x00050091, +0x00000000, +0xe1aa00d9, +0x00000000, +0xe3710000, +0x000a0000, +0x114a00f8, +0x1a000000, +0x00070000, +0x00000000, +0xe086b10b, +0xe3710000, +0x000a0000, +0x124b6b80, +0x114a00f8, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a000a6, +0xe200007e, +0xe2400000, +0x000a0000, +0xe19710b0, +0xe2511001, +0xe18710b0, +0x0a000000, +0x00050091, +0x00000000, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5170000, +0x000d8180, +0xe3a01000, +0xe790b10b, +0xe5071000, +0x000d8180, +0xe51ba000, +0x000d8180, +0xe5079000, +0x000d8180, +0xe5078000, +0x000d8180, +0xe12fff1a, +0x00000000, +0xe086b10b, +0xe24b6b80, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe1a000a6, +0xe200007e, +0xe2400000, +0x000a0000, +0xe19710b0, +0xe2511001, +0xe18710b0, +0x0a000000, +0x00050093, +0x00000000, +0xe5180000, +0x000d8180, +0xe5561000, +0x000d8180, +0xe5165000, +0x000d8180, +0xe15a0000, +0x8a000000, +0x00050020, +0x00000000, +0xe5d6c000, +0xe496e004, +0x00000000, +0x0006000c, +0xe15b0181, +0xe3e03000, +0x000a0000, +0xda000000, +0x00050003, +0x00000000, +0xe1a0b82e, +0xea000000, +0x00070000, +0x00000000, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0x0006000d, +0xe18920fb, +0xe28bb008, +0xea000000, +0x0005000c, +0x00000000, +0xe7f001f0, +0x00000000, +0xe5180000, +0x000d8180, +0xe089300b, +0xe08aa00b, +0xe5832000, +0xe28b1000, +0x000a0000, +0xe5165000, +0x000d8180, +0xe15a0000, +0xe5831004, +0x2a000000, +0x00050020, +0xe556c000, +0x000d8180, +0xe1a0a009, +0xe1a0b003, +0xe35c0000, +0xe2839008, +0x0a000000, +0x00050003, +0xe3e02000, +0x000a0000, +0x0006000b, +0xe15a000b, +0x30ca00d8, +0x21a01002, +0x350a2004, +0x0006000c, +0xe25cc001, +0xe1e300f8, +0x1a000000, +0x0005000b, +0x0006000d, +0xe5d6c000, +0xe496e004, +0xe797c10c, +0xe004a2ae, +0xe1a0b82e, +0xe12fff1c, +0x00000000, +0xe5123000, +0x000d8180, +0x00000000, +0xe5173000, +0x000d8180, +0x00000000, +0xe08a100b, +0xe5180000, +0x000d8180, +0xe089b00b, +0xe5089000, +0x000d8180, +0xe1510000, +0xe508b000, +0x000d8180, +0x00000000, +0xe5121000, +0x000d8180, +0x00000000, +0xe3e02000, +0x000a0000, +0xe1a00008, +0x8a000000, +0x0005001f, +0xe5072000, +0x000d8180, +0xe12fff33, +0xe5189000, +0x000d8180, +0xe3e02000, +0x000a0000, +0xe5181000, +0x000d8180, +0xe1a0b180, +0xe5072000, +0x000d8180, +0xe5196004, +0xe041a00b, +0xea000000, +0x00050016, +0x00000000, +0x00010000 +}; + +enum { + GLOB_vm_returnp, + GLOB_cont_dispatch, + GLOB_vm_returnc, + GLOB_BC_RET_Z, + GLOB_vm_return, + GLOB_vm_leave_cp, + GLOB_vm_leave_unw, + GLOB_vm_unwind_c, + GLOB_vm_unwind_c_eh, + GLOB_vm_unwind_ff, + GLOB_vm_unwind_ff_eh, + GLOB_vm_growstack_c, + GLOB_vm_growstack_l, + GLOB_vm_resume, + GLOB_vm_pcall, + GLOB_vm_call, + GLOB_vm_call_dispatch, + GLOB_vmeta_call, + GLOB_vm_call_dispatch_f, + GLOB_vm_cpcall, + GLOB_vm_call_tail, + GLOB_cont_cat, + GLOB_BC_CAT_Z, + GLOB_cont_nop, + GLOB_vmeta_tgets1, + GLOB_vmeta_tgets, + GLOB_vmeta_tgetb, + GLOB_vmeta_tgetv, + GLOB_vmeta_tsets1, + GLOB_vmeta_tsets, + GLOB_vmeta_tsetb, + GLOB_vmeta_tsetv, + GLOB_vmeta_comp, + GLOB_vmeta_binop, + GLOB_cont_ra, + GLOB_cont_condt, + GLOB_cont_condf, + GLOB_vmeta_equal, + GLOB_vmeta_equal_cd, + GLOB_vmeta_arith_vn, + GLOB_vmeta_arith_nv, + GLOB_vmeta_unm, + GLOB_vmeta_arith_vv, + GLOB_vmeta_len, + GLOB_BC_LEN_Z, + GLOB_vmeta_callt, + GLOB_BC_CALLT2_Z, + GLOB_vmeta_for, + GLOB_ff_assert, + GLOB_fff_fallback, + GLOB_fff_res, + GLOB_ff_type, + GLOB_fff_restv, + GLOB_ff_getmetatable, + GLOB_ff_setmetatable, + GLOB_ff_rawget, + GLOB_ff_tonumber, + GLOB_ff_tostring, + GLOB_fff_gcstep, + GLOB_ff_next, + GLOB_ff_pairs, + GLOB_ff_ipairs_aux, + GLOB_ff_ipairs, + GLOB_ff_pcall, + GLOB_ff_xpcall, + GLOB_ff_coroutine_resume, + GLOB_ff_coroutine_wrap_aux, + GLOB_ff_coroutine_yield, + GLOB_ff_math_floor, + GLOB_vm_floor, + GLOB_ff_math_ceil, + GLOB_vm_ceil, + GLOB_ff_math_abs, + GLOB_fff_res1, + GLOB_ff_math_sqrt, + GLOB_ff_math_log, + GLOB_ff_math_log10, + GLOB_ff_math_exp, + GLOB_ff_math_sin, + GLOB_ff_math_cos, + GLOB_ff_math_tan, + GLOB_ff_math_asin, + GLOB_ff_math_acos, + GLOB_ff_math_atan, + GLOB_ff_math_sinh, + GLOB_ff_math_cosh, + GLOB_ff_math_tanh, + GLOB_ff_math_pow, + GLOB_ff_math_atan2, + GLOB_ff_math_fmod, + GLOB_ff_math_deg, + GLOB_ff_math_rad, + GLOB_ff_math_ldexp, + GLOB_ff_math_frexp, + GLOB_ff_math_modf, + GLOB_ff_math_min, + GLOB_ff_math_max, + GLOB_ff_string_len, + GLOB_ff_string_byte, + GLOB_ff_string_char, + GLOB_fff_newstr, + GLOB_ff_string_sub, + GLOB_fff_emptystr, + GLOB_ff_string_rep, + GLOB_ff_string_reverse, + GLOB_ff_string_lower, + GLOB_ff_string_upper, + GLOB_ff_table_getn, + GLOB_vm_tobit_fb, + GLOB_vm_tobit, + GLOB_ff_bit_tobit, + GLOB_ff_bit_band, + GLOB_ff_bit_bor, + GLOB_ff_bit_bxor, + GLOB_ff_bit_bswap, + GLOB_ff_bit_bnot, + GLOB_ff_bit_lshift, + GLOB_ff_bit_rshift, + GLOB_ff_bit_arshift, + GLOB_ff_bit_rol, + GLOB_ff_bit_ror, + GLOB_vm_record, + GLOB_vm_rethook, + GLOB_vm_inshook, + GLOB_cont_hook, + GLOB_vm_hotloop, + GLOB_vm_callhook, + GLOB_vm_hotcall, + GLOB_vm_exit_handler, + GLOB_vm_exit_interp, + GLOB_vm_trunc, + GLOB_vm_mod, + GLOB_vm_modi, + GLOB_vm_foldarith, + GLOB_vm_ffi_call, + GLOB_BC_ISEQN_Z, + GLOB_BC_ISNEN_Z, + GLOB_BC_TGETS_Z, + GLOB_BC_TSETS_Z, + GLOB_BC_CALL_Z, + GLOB_BC_CALLT1_Z, + GLOB_BC_RETM_Z, + GLOB_BC_RETV2_Z, + GLOB_BC_RETV1_Z, + GLOB__MAX +}; +static const char *const globnames[] = { + "vm_returnp", + "cont_dispatch", + "vm_returnc", + "BC_RET_Z", + "vm_return", + "vm_leave_cp", + "vm_leave_unw", + "vm_unwind_c", + "vm_unwind_c_eh", + "vm_unwind_ff", + "vm_unwind_ff_eh", + "vm_growstack_c", + "vm_growstack_l", + "vm_resume", + "vm_pcall", + "vm_call", + "vm_call_dispatch", + "vmeta_call", + "vm_call_dispatch_f", + "vm_cpcall", + "vm_call_tail", + "cont_cat", + "BC_CAT_Z", + "cont_nop", + "vmeta_tgets1", + "vmeta_tgets", + "vmeta_tgetb", + "vmeta_tgetv", + "vmeta_tsets1", + "vmeta_tsets", + "vmeta_tsetb", + "vmeta_tsetv", + "vmeta_comp", + "vmeta_binop", + "cont_ra", + "cont_condt", + "cont_condf", + "vmeta_equal", + "vmeta_equal_cd", + "vmeta_arith_vn", + "vmeta_arith_nv", + "vmeta_unm", + "vmeta_arith_vv", + "vmeta_len", + "BC_LEN_Z", + "vmeta_callt", + "BC_CALLT2_Z", + "vmeta_for", + "ff_assert", + "fff_fallback", + "fff_res", + "ff_type", + "fff_restv", + "ff_getmetatable", + "ff_setmetatable", + "ff_rawget", + "ff_tonumber", + "ff_tostring", + "fff_gcstep", + "ff_next", + "ff_pairs", + "ff_ipairs_aux", + "ff_ipairs", + "ff_pcall", + "ff_xpcall", + "ff_coroutine_resume", + "ff_coroutine_wrap_aux", + "ff_coroutine_yield", + "ff_math_floor", + "vm_floor", + "ff_math_ceil", + "vm_ceil", + "ff_math_abs", + "fff_res1", + "ff_math_sqrt", + "ff_math_log", + "ff_math_log10", + "ff_math_exp", + "ff_math_sin", + "ff_math_cos", + "ff_math_tan", + "ff_math_asin", + "ff_math_acos", + "ff_math_atan", + "ff_math_sinh", + "ff_math_cosh", + "ff_math_tanh", + "ff_math_pow", + "ff_math_atan2", + "ff_math_fmod", + "ff_math_deg", + "ff_math_rad", + "ff_math_ldexp", + "ff_math_frexp", + "ff_math_modf", + "ff_math_min", + "ff_math_max", + "ff_string_len", + "ff_string_byte", + "ff_string_char", + "fff_newstr", + "ff_string_sub", + "fff_emptystr", + "ff_string_rep", + "ff_string_reverse", + "ff_string_lower", + "ff_string_upper", + "ff_table_getn", + "vm_tobit_fb", + "vm_tobit", + "ff_bit_tobit", + "ff_bit_band", + "ff_bit_bor", + "ff_bit_bxor", + "ff_bit_bswap", + "ff_bit_bnot", + "ff_bit_lshift", + "ff_bit_rshift", + "ff_bit_arshift", + "ff_bit_rol", + "ff_bit_ror", + "vm_record", + "vm_rethook", + "vm_inshook", + "cont_hook", + "vm_hotloop", + "vm_callhook", + "vm_hotcall", + "vm_exit_handler", + "vm_exit_interp", + "vm_trunc", + "vm_mod", + "vm_modi", + "vm_foldarith", + "vm_ffi_call", + "BC_ISEQN_Z", + "BC_ISNEN_Z", + "BC_TGETS_Z", + "BC_TSETS_Z", + "BC_CALL_Z", + "BC_CALLT1_Z", + "BC_RETM_Z", + "BC_RETV2_Z", + "BC_RETV1_Z", + (const char *)0 +}; +static const char *const extnames[] = { + "lj_state_growstack", + "lj_meta_tget", + "lj_meta_tset", + "lj_meta_comp", + "lj_meta_equal", + "lj_meta_equal_cd", + "lj_meta_arith", + "lj_meta_len", + "lj_meta_call", + "lj_meta_for", + "lj_tab_get", + "lj_str_fromnumber", + "lj_tab_next", + "lj_tab_getinth", + "lj_ffh_coroutine_wrap_err", + "sqrt", + "log", + "log10", + "exp", + "sin", + "cos", + "tan", + "asin", + "acos", + "atan", + "sinh", + "cosh", + "tanh", + "pow", + "atan2", + "fmod", + "__aeabi_dmul", + "ldexp", + "frexp", + "modf", + "__aeabi_i2d", + "__aeabi_cdcmple", + "lj_str_new", + "lj_tab_len", + "lj_gc_step", + "lj_dispatch_ins", + "lj_trace_hot", + "lj_dispatch_call", + "lj_trace_exit", + "lj_err_throw", + "__aeabi_ddiv", + "__aeabi_dadd", + "__aeabi_dsub", + "__aeabi_cdcmpeq", + "lj_meta_cat", + "lj_gc_barrieruv", + "lj_func_closeuv", + "lj_func_newL_gc", + "lj_tab_new", + "lj_tab_dup", + "lj_gc_step_fixtop", + "lj_tab_newkey", + "lj_tab_reasize", + (const char *)0 +}; +#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V) +#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V) +#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V) +#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V) +#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V) +#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V) +#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V) +#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V) +#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V) +#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V) +#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V) +#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V) +#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V) +#define field_pc pc +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) + +#if !LJ_DUALNUM +#error "Only dual-number mode supported for ARM target" +#endif + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + dasm_put(Dst, 0); + dasm_put(Dst, 1, FRAME_P, ~LJ_TTRUE, FRAME_TYPE, FRAME_TYPEP, FRAME_C, Dt1(->base), LJ_VMST_C, DISPATCH_GL(vmstate), Dt1(->top)); + dasm_put(Dst, 54, Dt1(->cframe), Dt1(->maxstack), ~LJ_TNIL, Dt1(->top), Dt1(->top), LJ_VMST_C, Dt1(->glref), Dt2(->vmstate)); + dasm_put(Dst, 108, ~CFRAME_RAWMASK, Dt1(->base), Dt1(->glref), ~LJ_TFALSE, GG_G2DISP, LJ_VMST_INTERP, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->field_pc), Dt1(->glref)); + dasm_put(Dst, 173, GG_G2DISP, FRAME_CP, CFRAME_RESUME, Dt1(->status), Dt1(->cframe), Dt1(->base), Dt1(->top), Dt1(->status), LJ_VMST_INTERP, FRAME_TYPE, DISPATCH_GL(vmstate), FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe)); + dasm_put(Dst, 238, Dt1(->glref), GG_G2DISP, Dt1(->base), Dt1(->top), LJ_VMST_INTERP, DISPATCH_GL(vmstate), -LJ_TFUNC, Dt7(->field_pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP); + dasm_put(Dst, 307, Dt7(->field_pc), ~LJ_TNIL, PC2PROTO(k), Dt1(->base), -DISPATCH_GL(tmptv), ~LJ_TTAB, ~LJ_TSTR, ~LJ_TISNUM); + dasm_put(Dst, 378, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 388, Dt1(->base)); + } + dasm_put(Dst, 391, FRAME_CONT, Dt1(->top), -DISPATCH_GL(tmptv), ~LJ_TTAB, ~LJ_TSTR, ~LJ_TISNUM, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 453, Dt1(->base)); + } + dasm_put(Dst, 456, FRAME_CONT, Dt1(->top), Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 492, Dt1(->base)); + } + dasm_put(Dst, 495, ~LJ_TTRUE, -LJ_TFALSE, Dt1(->base)); +#if LJ_HASFFI + dasm_put(Dst, 542, Dt1(->base)); +#endif + dasm_put(Dst, 553, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 590, Dt1(->base)); + } + dasm_put(Dst, 593, FRAME_CONT, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 614, Dt1(->base)); + } +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 617); +#else + dasm_put(Dst, 624); +#endif + dasm_put(Dst, 627, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 635); + } + dasm_put(Dst, 637); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 640); + } + dasm_put(Dst, 642, Dt7(->field_pc), Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 663, Dt1(->base)); + } + dasm_put(Dst, 666, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 680, Dt1(->base)); + } +#if LJ_HASJIT + dasm_put(Dst, 683); +#endif + dasm_put(Dst, 685); +#if LJ_HASJIT + dasm_put(Dst, 687, BC_JFORI); +#endif + dasm_put(Dst, 690); +#if LJ_HASJIT + dasm_put(Dst, 693, BC_JFORI); +#endif + dasm_put(Dst, 696, BC_FORI, -LJ_TTRUE, -LJ_TISNUM, ~LJ_TISNUM, (int)(offsetof(GCfuncC, upvalue)>>3)-1, -LJ_TTAB, -LJ_TUDATA, Dt6(->metatable)); + dasm_put(Dst, 753, ~LJ_TNIL, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable]), Dt6(->hmask), Dt5(->hash), Dt6(->node), DtB(->key), DtB(->val), DtB(->next), -LJ_TSTR, ~LJ_TTAB, -LJ_TNIL, -LJ_TISNUM); + dasm_put(Dst, 801, ~LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT]), -LJ_TTAB, Dt6(->metatable), -LJ_TTAB, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), DISPATCH_GL(gc.grayagain), LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist), -LJ_TTAB); + dasm_put(Dst, 853); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 858); + } + dasm_put(Dst, 860); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 863); + } + dasm_put(Dst, 865, -LJ_TISNUM, -LJ_TSTR, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), Dt1(->base), -LJ_TISNUM, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), ~LJ_TSTR); + dasm_put(Dst, 917, ~LJ_TNIL, -LJ_TTAB, Dt1(->base), Dt1(->top)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 940, Dt1(->base)); + } + dasm_put(Dst, 943, ~LJ_TNIL, (2+1)*8, -LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 966, Dt6(->metatable)); +#endif + dasm_put(Dst, 969, Dt8(->upvalue[0])); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 973); +#endif + dasm_put(Dst, 977, ~LJ_TNIL, (3+1)*8, -LJ_TTAB, -LJ_TISNUM, Dt6(->asize), Dt6(->array), (0+1)*8, -LJ_TNIL, (2+1)*8, Dt6(->hmask)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1027); + } + dasm_put(Dst, 1029); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1032); + } + dasm_put(Dst, 1034, -LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1050, Dt6(->metatable)); +#endif + dasm_put(Dst, 1053, Dt8(->upvalue[0])); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1057); +#endif + dasm_put(Dst, 1061, ~LJ_TISNUM, (3+1)*8, DISPATCH_GL(hookmask), HOOK_ACTIVE, 8+FRAME_PCALL, 8+FRAME_PCALLH, DISPATCH_GL(hookmask), -LJ_TFUNC, HOOK_ACTIVE, 16+FRAME_PCALL, 16+FRAME_PCALLH, -LJ_TTHREAD); + dasm_put(Dst, 1120, Dt1(->base), Dt1(->top), Dt1(->status), Dt1(->base), Dt1(->maxstack), Dt1(->cframe), LUA_YIELD, Dt1(->top), Dt1(->top), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate), LUA_YIELD); + dasm_put(Dst, 1179, Dt1(->base), Dt1(->maxstack), Dt1(->top), ~LJ_TTRUE, FRAME_TYPE, ~LJ_TFALSE, (2+1)*8, Dt1(->top)); + dasm_put(Dst, 1239, Dt8(->upvalue[0].gcr), Dt1(->base), Dt1(->top), Dt1(->status), Dt1(->base), Dt1(->maxstack), Dt1(->cframe), LUA_YIELD, Dt1(->top), Dt1(->top), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate), LUA_YIELD); + dasm_put(Dst, 1295, Dt1(->base), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, Dt1(->cframe), Dt1(->base), CFRAME_RESUME, Dt1(->top)); + dasm_put(Dst, 1354, LUA_YIELD, Dt1(->cframe), Dt1(->status), -LJ_TISNUM, ~LJ_TISNUM, ~LJ_TISNUM); + dasm_put(Dst, 1420, -LJ_TISNUM, ~LJ_TISNUM, ~LJ_TISNUM); + dasm_put(Dst, 1484, -LJ_TISNUM, (1+1)*8, FRAME_TYPE, ~LJ_TNIL); + dasm_put(Dst, 1548, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1553); + } + dasm_put(Dst, 1555); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1558); + } + dasm_put(Dst, 1560, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1572); + } + dasm_put(Dst, 1574); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1577); + } + dasm_put(Dst, 1579, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1591); + } + dasm_put(Dst, 1593); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1596); + } + dasm_put(Dst, 1598, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1610); + } + dasm_put(Dst, 1612); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1615); + } + dasm_put(Dst, 1617, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1629); + } + dasm_put(Dst, 1631); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1634); + } + dasm_put(Dst, 1636, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1648); + } + dasm_put(Dst, 1650); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1653); + } + dasm_put(Dst, 1655, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1667); + } + dasm_put(Dst, 1669); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1672); + } + dasm_put(Dst, 1674, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1686); + } + dasm_put(Dst, 1688); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1691); + } + dasm_put(Dst, 1693, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1705); + } + dasm_put(Dst, 1707); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1710); + } + dasm_put(Dst, 1712, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1724); + } + dasm_put(Dst, 1726); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1729); + } + dasm_put(Dst, 1731, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1743); + } + dasm_put(Dst, 1745); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1748); + } + dasm_put(Dst, 1750, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1762); + } + dasm_put(Dst, 1764); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1767); + } + dasm_put(Dst, 1769, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1781); + } + dasm_put(Dst, 1783); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1786); + } + dasm_put(Dst, 1788, -LJ_TISNUM, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1803); + } + dasm_put(Dst, 1805); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1808); + } + dasm_put(Dst, 1810, -LJ_TISNUM, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1825); + } + dasm_put(Dst, 1827); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1830); + } + dasm_put(Dst, 1832, -LJ_TISNUM, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1847); + } + dasm_put(Dst, 1849); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1852); + } + dasm_put(Dst, 1854, -LJ_TISNUM, Dt8(->upvalue[0]), -LJ_TISNUM, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1887); + } + dasm_put(Dst, 1889); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1892); + } + dasm_put(Dst, 1894, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1907); + } + dasm_put(Dst, 1909); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1912); + } + dasm_put(Dst, 1914, ~LJ_TISNUM, (2+1)*8, -LJ_TISNUM); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1936); + } + dasm_put(Dst, 1938); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 1941); + } + dasm_put(Dst, 1943, (2+1)*8, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM); + dasm_put(Dst, 1994, -LJ_TISNUM, -LJ_TISNUM); + dasm_put(Dst, 2048, -LJ_TISNUM, -LJ_TSTR, Dt5(->len), ~LJ_TISNUM, -LJ_TSTR, Dt5(->len), Dt5([1])); + dasm_put(Dst, 2102, ~LJ_TISNUM, (0+1)*8, (1+1)*8, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TISNUM, Dt1(->base), Dt1(->base), ~LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2161, -LJ_TISNUM, -LJ_TSTR, Dt5(->len), -LJ_TISNUM, sizeof(GCstr)-1, -DISPATCH_GL(strempty), ~LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, -LJ_TISNUM, Dt5(->len)); + dasm_put(Dst, 2223, DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), Dt5([1]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr)); + dasm_put(Dst, 2276, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2336, -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr), -LJ_TTAB); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 2374); + } + dasm_put(Dst, 2376); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 2379); + } + dasm_put(Dst, 2381, ~LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM); + dasm_put(Dst, 2452, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM); + dasm_put(Dst, 2512, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM); + dasm_put(Dst, 2568, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM); + dasm_put(Dst, 2625, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, Dt1(->maxstack), Dt1(->top), Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->base), Dt1(->top), Dt7(->field_pc), FRAME_TYPE, FRAME_TYPEP); + dasm_put(Dst, 2688, LUA_MINSTACK, Dt1(->base), Dt1(->base), Dt1(->top), Dt1(->base)); +#if LJ_HASJIT + dasm_put(Dst, 2722, DISPATCH_GL(hookmask), HOOK_VMEVENT, DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount)); +#endif + dasm_put(Dst, 2742, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 2788, GG_DISP2STATIC); +#if LJ_HASJIT + dasm_put(Dst, 2804, -GG_DISP2J, Dt7(->field_pc), DISPATCH_J(L), PC2PROTO(framesize), Dt1(->base), Dt1(->top)); +#endif + dasm_put(Dst, 2825); +#if LJ_HASJIT + dasm_put(Dst, 2828); +#endif + dasm_put(Dst, 2831); +#if LJ_HASJIT + dasm_put(Dst, 2833); +#endif + dasm_put(Dst, 2836, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 2859, LJ_VMST_EXIT, DISPATCH_GL(vmstate), DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(exitno), DISPATCH_J(L), Dt1(->base), DISPATCH_GL(jit_L), -GG_DISP2J, Dt1(->cframe), Dt1(->base), ~CFRAME_RAWMASK); +#endif + dasm_put(Dst, 2907); +#if LJ_HASJIT + dasm_put(Dst, 2909, Dt7(->field_pc), DISPATCH_GL(jit_L), LJ_VMST_INTERP, PC2PROTO(k), DISPATCH_GL(vmstate), BC_FUNCF); +#endif + dasm_put(Dst, 2946); +#if LJ_HASJIT + dasm_put(Dst, 3020); +#endif + dasm_put(Dst, 3034); + { + int i; + for (i = 31; i >= 0; i--) { + dasm_put(Dst, 3070, i, i); + } + } + dasm_put(Dst, 3075); +#if LJ_HASJIT + dasm_put(Dst, 3104); +#else + dasm_put(Dst, 3129); +#endif + dasm_put(Dst, 3131); +#if LJ_HASFFI +#define DtE(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V) + dasm_put(Dst, 3133, DtE(->spadj), DtE(->nsp), offsetof(CCallState, stack), DtE(->func), DtE(->gpr[0]), DtE(->gpr[1]), DtE(->gpr[2]), DtE(->gpr[3]), DtE(->gpr[0]), DtE(->gpr[1])); +#endif +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + dasm_put(Dst, 3171, defop); + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + dasm_put(Dst, 3173, -LJ_TISNUM, -LJ_TISNUM); + if (op == BC_ISLT) { + dasm_put(Dst, 3189); + } else if (op == BC_ISGE) { + dasm_put(Dst, 3191); + } else if (op == BC_ISLE) { + dasm_put(Dst, 3193); + } else { + dasm_put(Dst, 3195); + } + dasm_put(Dst, 3197, -LJ_TISNUM); + if (op == BC_ISLT) { + dasm_put(Dst, 3233); + } else if (op == BC_ISGE) { + dasm_put(Dst, 3235); + } else if (op == BC_ISLE) { + dasm_put(Dst, 3237); + } else { + dasm_put(Dst, 3239); + } + dasm_put(Dst, 3241); + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + dasm_put(Dst, 3244, -LJ_TISNUM, -LJ_TISNUM); + if (vk) { + dasm_put(Dst, 3255); + } else { + dasm_put(Dst, 3258); + } + if (LJ_HASFFI) { + dasm_put(Dst, 3261, -LJ_TCDATA, -LJ_TCDATA); + } + dasm_put(Dst, 3268, -LJ_TISPRI); + if (vk) { + dasm_put(Dst, 3277, -LJ_TISTABUD); + } else { + dasm_put(Dst, 3294, -LJ_TISTABUD); + } + dasm_put(Dst, 3301, Dt6(->metatable)); + if (vk) { + dasm_put(Dst, 3305); + } else { + dasm_put(Dst, 3308); + } + dasm_put(Dst, 3311, Dt6(->nomm), 1-vk, 1<<MM_eq); + if (vk) { + dasm_put(Dst, 3321); + } else { + dasm_put(Dst, 3324); + } + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + dasm_put(Dst, 3334, -LJ_TSTR); + if (LJ_HASFFI) { + dasm_put(Dst, 3343); + } else { + dasm_put(Dst, 3347); + } + if (vk) { + dasm_put(Dst, 3349); + } else { + dasm_put(Dst, 3352); + } + dasm_put(Dst, 3355); + if (LJ_HASFFI) { + dasm_put(Dst, 3362, -LJ_TCDATA); + } + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + dasm_put(Dst, 3370); + if (vk) { + dasm_put(Dst, 3377); + } else { + dasm_put(Dst, 3379); + } + dasm_put(Dst, 3381, -LJ_TISNUM, -LJ_TISNUM); + if (vk) { + dasm_put(Dst, 3391); + } else { + dasm_put(Dst, 3394); + } + dasm_put(Dst, 3397); + if (LJ_HASFFI) { + dasm_put(Dst, 3406); + } else { + if (!vk) { + dasm_put(Dst, 3409); + } + dasm_put(Dst, 3411); + } + dasm_put(Dst, 3414, -LJ_TISNUM); + if (vk) { + dasm_put(Dst, 3430); + } else { + dasm_put(Dst, 3432); + } + dasm_put(Dst, 3434); + if (LJ_HASFFI) { + dasm_put(Dst, 3437, -LJ_TCDATA); + } + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + dasm_put(Dst, 3445); + if (LJ_HASFFI) { + dasm_put(Dst, 3451, -LJ_TCDATA); + } + dasm_put(Dst, 3456); + if (vk) { + dasm_put(Dst, 3458); + } else { + dasm_put(Dst, 3460); + } + dasm_put(Dst, 3462); + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + dasm_put(Dst, 3469, -LJ_TTRUE); + if (op == BC_ISTC || op == BC_IST) { + dasm_put(Dst, 3477); + if (op == BC_ISTC) { + dasm_put(Dst, 3479); + } + } else { + dasm_put(Dst, 3481); + if (op == BC_ISFC) { + dasm_put(Dst, 3483); + } + } + dasm_put(Dst, 3485); + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + dasm_put(Dst, 3492); + break; + case BC_NOT: + dasm_put(Dst, 3502, -LJ_TTRUE, ~LJ_TFALSE, ~LJ_TTRUE); + break; + case BC_UNM: + dasm_put(Dst, 3519, -LJ_TISNUM); + break; + case BC_LEN: + dasm_put(Dst, 3545, -LJ_TSTR, Dt5(->len), ~LJ_TISNUM, -LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 3569, Dt6(->metatable)); +#endif + dasm_put(Dst, 3576); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 3578); + } + dasm_put(Dst, 3580); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 3583); + } + dasm_put(Dst, 3585); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 3588, Dt6(->nomm), 1<<MM_len); +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + dasm_put(Dst, 3598); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3601); + break; + case 1: + dasm_put(Dst, 3604); + break; + default: + dasm_put(Dst, 3607); + break; + } + dasm_put(Dst, 3610); + if (vk == 1) { + dasm_put(Dst, 3612, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3617, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3622); + switch (vk) { + case 0: + dasm_put(Dst, 3626); + break; + case 1: + dasm_put(Dst, 3629); + break; + default: + dasm_put(Dst, 3632); + break; + } + dasm_put(Dst, 3635); + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 3644, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3649, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3654); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 3657, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3662, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3667); + break; + default: + if (vk == 1) { + dasm_put(Dst, 3670, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3675, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3680); + break; + } + dasm_put(Dst, 3683); + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + dasm_put(Dst, 3689); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3692); + break; + case 1: + dasm_put(Dst, 3695); + break; + default: + dasm_put(Dst, 3698); + break; + } + dasm_put(Dst, 3701); + if (vk == 1) { + dasm_put(Dst, 3703, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3708, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3713); + switch (vk) { + case 0: + dasm_put(Dst, 3717); + break; + case 1: + dasm_put(Dst, 3720); + break; + default: + dasm_put(Dst, 3723); + break; + } + dasm_put(Dst, 3726); + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 3735, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3740, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3745); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 3748, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3753, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3758); + break; + default: + if (vk == 1) { + dasm_put(Dst, 3761, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3766, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3771); + break; + } + dasm_put(Dst, 3774); + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + dasm_put(Dst, 3780); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3783); + break; + case 1: + dasm_put(Dst, 3786); + break; + default: + dasm_put(Dst, 3789); + break; + } + dasm_put(Dst, 3792); + if (vk == 1) { + dasm_put(Dst, 3794, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3799, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3804); + switch (vk) { + case 0: + dasm_put(Dst, 3809); + break; + case 1: + dasm_put(Dst, 3812); + break; + default: + dasm_put(Dst, 3815); + break; + } + dasm_put(Dst, 3818); + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 3827, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3832, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3837); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 3840, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3845, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3850); + break; + default: + if (vk == 1) { + dasm_put(Dst, 3853, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3858, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3863); + break; + } + dasm_put(Dst, 3866); + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + dasm_put(Dst, 3872); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3875); + break; + case 1: + dasm_put(Dst, 3878); + break; + default: + dasm_put(Dst, 3881); + break; + } + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 3884, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3889, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3894); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 3897, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3902, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3907); + break; + default: + if (vk == 1) { + dasm_put(Dst, 3910, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3915, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3920); + break; + } + dasm_put(Dst, 3923); + break; + case BC_MODVN: case BC_MODNV: case BC_MODVV: + dasm_put(Dst, 3933); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3936); + break; + case 1: + dasm_put(Dst, 3939); + break; + default: + dasm_put(Dst, 3942); + break; + } + if (vk == 1) { + dasm_put(Dst, 3945, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3950, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3955); + switch (vk) { + case 0: + dasm_put(Dst, 3959); + break; + case 1: + dasm_put(Dst, 3962); + break; + default: + dasm_put(Dst, 3965); + break; + } + dasm_put(Dst, 3968, ~LJ_TISNUM); + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 3982, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 3987, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 3992); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 3995, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 4000, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 4005); + break; + default: + if (vk == 1) { + dasm_put(Dst, 4008, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 4013, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 4018); + break; + } + dasm_put(Dst, 4021); + break; + case BC_POW: + dasm_put(Dst, 4026); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 4029); + break; + case 1: + dasm_put(Dst, 4032); + break; + default: + dasm_put(Dst, 4035); + break; + } + switch (vk) { + case 0: + if (vk == 1) { + dasm_put(Dst, 4038, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 4043, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 4048); + break; + case 1: + if (vk == 1) { + dasm_put(Dst, 4051, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 4056, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 4061); + break; + default: + if (vk == 1) { + dasm_put(Dst, 4064, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 4069, -LJ_TISNUM, -LJ_TISNUM); + } + dasm_put(Dst, 4074); + break; + } + if (LJ_TARGET_OSX) { + dasm_put(Dst, 4077); + } + dasm_put(Dst, 4079); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 4082); + } + dasm_put(Dst, 4084); + break; + + case BC_CAT: + dasm_put(Dst, 4092, Dt1(->base), Dt1(->base)); + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + dasm_put(Dst, 4118, ~LJ_TSTR); + break; + case BC_KCDATA: +#if LJ_HASFFI + dasm_put(Dst, 4130, ~LJ_TCDATA); +#endif + break; + case BC_KSHORT: + dasm_put(Dst, 4142, ~LJ_TISNUM); + break; + case BC_KNUM: + dasm_put(Dst, 4153); + break; + case BC_KPRI: + dasm_put(Dst, 4163); + break; + case BC_KNIL: + dasm_put(Dst, 4173, ~LJ_TNIL); + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + dasm_put(Dst, 4192, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETV: + dasm_put(Dst, 4208, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->closed), DtA(->v), LJ_GC_BLACK, -LJ_TISGCV, -(LJ_TISNUM - LJ_TISGCV), Dt4(->gch.marked), -GG_DISP2G, LJ_GC_WHITES); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 4248); + } else { + dasm_put(Dst, 4255); + } + dasm_put(Dst, 4258); + break; + case BC_USETS: + dasm_put(Dst, 4261, offsetof(GCfuncL, uvptr), ~LJ_TSTR, DtA(->marked), DtA(->v), DtA(->closed), LJ_GC_BLACK, Dt5(->marked), LJ_GC_WHITES, -GG_DISP2G); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 4297); + } else { + dasm_put(Dst, 4304); + } + dasm_put(Dst, 4307); + break; + case BC_USETN: + dasm_put(Dst, 4310, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETP: + dasm_put(Dst, 4327, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + + case BC_UCLO: + dasm_put(Dst, 4343, Dt1(->openupval), Dt1(->base), Dt1(->base)); + break; + + case BC_FNEW: + dasm_put(Dst, 4366, Dt1(->base), Dt1(->base), ~LJ_TFUNC); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + if (op == BC_TDUP) { + dasm_put(Dst, 4387); + } + dasm_put(Dst, 4389, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base)); + if (op == BC_TNEW) { + dasm_put(Dst, 4402); + } else { + dasm_put(Dst, 4411); + } + dasm_put(Dst, 4415, Dt1(->base), ~LJ_TTAB); + break; + + case BC_GGET: + case BC_GSET: + dasm_put(Dst, 4433, Dt7(->env)); + if (op == BC_GGET) { + dasm_put(Dst, 4439); + } else { + dasm_put(Dst, 4442); + } + break; + + case BC_TGETV: + dasm_put(Dst, 4445, -LJ_TTAB, -LJ_TISNUM, Dt6(->array), Dt6(->asize), -LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<<MM_index, -LJ_TSTR); + break; + case BC_TGETS: + dasm_put(Dst, 4502, -LJ_TTAB, Dt6(->hmask), Dt5(->hash), Dt6(->node), DtB(->key), DtB(->val), DtB(->next), -LJ_TSTR, -LJ_TNIL, Dt6(->metatable), ~LJ_TNIL, Dt6(->nomm)); + dasm_put(Dst, 4562, 1<<MM_index); + break; + case BC_TGETB: + dasm_put(Dst, 4569, -LJ_TTAB, Dt6(->asize), Dt6(->array), -LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + + case BC_TSETV: + dasm_put(Dst, 4612, -LJ_TTAB, -LJ_TISNUM, Dt6(->array), Dt6(->asize), -LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex); + dasm_put(Dst, 4672, DISPATCH_GL(gc.grayagain), LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist), -LJ_TSTR); + break; + case BC_TSETS: + dasm_put(Dst, 4693, -LJ_TTAB, Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), DtB(->key), DtB(->val.it), DtB(->next), -LJ_TSTR, Dt6(->marked), -LJ_TNIL, LJ_GC_BLACK, DtB(->val)); + dasm_put(Dst, 4751, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, Dt6(->metatable), Dt1(->base), Dt6(->nomm), 1<<MM_newindex, ~LJ_TSTR, Dt1(->base), DISPATCH_GL(gc.grayagain), LJ_GC_BLACK); + dasm_put(Dst, 4804, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + break; + case BC_TSETB: + dasm_put(Dst, 4813, -LJ_TTAB, Dt6(->asize), Dt6(->array), -LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, DISPATCH_GL(gc.grayagain), LJ_GC_BLACK); + dasm_put(Dst, 4871, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + break; + + case BC_TSETM: + dasm_put(Dst, 4880, Dt6(->asize), Dt6(->array), Dt6(->marked), LJ_GC_BLACK, Dt1(->base)); + if (LJ_TARGET_OSX) { + dasm_put(Dst, 4925, Dt1(->base)); + } + dasm_put(Dst, 4928, DISPATCH_GL(gc.grayagain), LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + dasm_put(Dst, 4944); + break; + case BC_CALL: + dasm_put(Dst, 4950, -LJ_TFUNC, Dt7(->field_pc)); + break; + + case BC_CALLMT: + dasm_put(Dst, 4970); + break; + case BC_CALLT: + dasm_put(Dst, 4975, -LJ_TFUNC, Dt7(->ffid), FRAME_TYPE, Dt7(->field_pc), Dt7(->field_pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP); + dasm_put(Dst, 5036, FRAME_TYPE); + break; + + case BC_ITERC: + dasm_put(Dst, 5047, -LJ_TFUNC, Dt7(->field_pc)); + break; + + case BC_ITERN: +#if LJ_HASJIT +#endif + dasm_put(Dst, 5071, Dt6(->asize), Dt6(->array), -LJ_TNIL, ~LJ_TISNUM, Dt6(->hmask), Dt6(->node), DtB(->val), -LJ_TNIL, DtB(->key)); + break; + + case BC_ISNEXT: + dasm_put(Dst, 5136, -LJ_TFUNC, Dt8(->ffid), -LJ_TTAB, -LJ_TNIL, FF_next_N, BC_JMP, BC_ITERC); + break; + + case BC_VARG: + dasm_put(Dst, 5175, FRAME_VARG, ~LJ_TNIL, Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->base)); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + dasm_put(Dst, 5247); + break; + + case BC_RET: + dasm_put(Dst, 5254, FRAME_TYPE, FRAME_VARG, Dt7(->field_pc), PC2PROTO(k), ~LJ_TNIL, FRAME_TYPEP); + break; + + case BC_RET0: case BC_RET1: + dasm_put(Dst, 5319, FRAME_TYPE, FRAME_VARG); + if (op == BC_RET1) { + dasm_put(Dst, 5330); + } + dasm_put(Dst, 5332); + if (op == BC_RET1) { + dasm_put(Dst, 5335); + } + dasm_put(Dst, 5337, Dt7(->field_pc), PC2PROTO(k), ~LJ_TNIL); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + + case BC_FORL: +#if LJ_HASJIT + dasm_put(Dst, 5363, -GG_DISP2HOT); +#endif + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + dasm_put(Dst, 5373); + if (op != BC_JFORL) { + dasm_put(Dst, 5375); + } + if (!vk) { + dasm_put(Dst, 5377, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 5395, -LJ_TISNUM); + if (op == BC_IFORL) { + dasm_put(Dst, 5403); + } else { + dasm_put(Dst, 5405); + } + dasm_put(Dst, 5408); + } + dasm_put(Dst, 5413); + if (op == BC_FORI) { + dasm_put(Dst, 5415); + } else if (op == BC_JFORI) { + dasm_put(Dst, 5417); + } else if (op == BC_IFORL) { + dasm_put(Dst, 5420); + } + if (vk) { + dasm_put(Dst, 5422); + } + dasm_put(Dst, 5424); + if (op == BC_JFORI || op == BC_JFORL) { + dasm_put(Dst, 5429, BC_JLOOP); + } + dasm_put(Dst, 5432); + if (!vk) { + dasm_put(Dst, 5439); + } else { + dasm_put(Dst, 5441); + } + dasm_put(Dst, 5443); + if (!vk) { + dasm_put(Dst, 5447, -LJ_TISNUM, -LJ_TISNUM); + } else { + dasm_put(Dst, 5459); + } + dasm_put(Dst, 5468); + if (op == BC_FORI) { + dasm_put(Dst, 5472); + } else if (op == BC_JFORI) { + dasm_put(Dst, 5474, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 5479); + } else { + dasm_put(Dst, 5481, BC_JLOOP); + } + dasm_put(Dst, 5484); + if (vk) { + dasm_put(Dst, 5490); + } + dasm_put(Dst, 5495); + break; + + case BC_ITERL: +#if LJ_HASJIT + dasm_put(Dst, 5501, -GG_DISP2HOT); +#endif + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + dasm_put(Dst, 5511); + if (op == BC_JITERL) { + dasm_put(Dst, 5513, -LJ_TNIL, BC_JLOOP); + } else { + dasm_put(Dst, 5519, -LJ_TNIL); + } + dasm_put(Dst, 5525); + break; + + case BC_LOOP: +#if LJ_HASJIT + dasm_put(Dst, 5532, -GG_DISP2HOT); +#endif + break; + + case BC_ILOOP: + dasm_put(Dst, 5542); + break; + + case BC_JLOOP: +#if LJ_HASJIT + dasm_put(Dst, 5549, DISPATCH_J(trace), DISPATCH_GL(vmstate), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L)); +#endif + break; + + case BC_JMP: + dasm_put(Dst, 5563); + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: +#if LJ_HASJIT + dasm_put(Dst, 5572, -GG_DISP2HOT); +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + dasm_put(Dst, 5582, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k)); + if (op != BC_JFUNCF) { + dasm_put(Dst, 5592); + } + dasm_put(Dst, 5595, ~LJ_TNIL); + if (op == BC_JFUNCF) { + dasm_put(Dst, 5602, BC_JLOOP); + } else { + dasm_put(Dst, 5606); + } + dasm_put(Dst, 5611); + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + dasm_put(Dst, 5617); + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + dasm_put(Dst, 5619, Dt1(->maxstack), 8+FRAME_VARG, -4+PC2PROTO(k), -4+PC2PROTO(numparams), ~LJ_TNIL); + break; + + case BC_FUNCC: + case BC_FUNCCW: + if (op == BC_FUNCC) { + dasm_put(Dst, 5660, Dt8(->f)); + } else { + dasm_put(Dst, 5663, DISPATCH_GL(wrapf)); + } + dasm_put(Dst, 5666, Dt1(->maxstack), Dt1(->base), Dt1(->top)); + if (op == BC_FUNCCW) { + dasm_put(Dst, 5676, Dt8(->f)); + } + dasm_put(Dst, 5679, LJ_VMST_C, DISPATCH_GL(vmstate), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate)); + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + dasm_put(Dst, 5701); + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 0xe\n" /* Return address is in lr. */ + "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */ + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ + "\t.byte 0x8e\n\t.uleb128 1\n", /* Restore lr. */ + (int)ctx->codesz, CFRAME_SIZE); + for (i = 11; i >= 4; i--) /* Restore r4-r11. */ + fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i)); + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); + break; + default: + break; + } +} + diff --git a/src/luajit2/src/buildvm_asm.c b/src/luajit2/src/buildvm_asm.c new file mode 100644 index 0000000000..49d6ffcaea --- /dev/null +++ b/src/luajit2/src/buildvm_asm.c @@ -0,0 +1,262 @@ +/* +** LuaJIT VM builder: Assembler source code emitter. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_bc.h" + +/* ------------------------------------------------------------------------ */ + +#if LJ_TARGET_X86ORX64 +/* Emit bytes piecewise as assembler text. */ +static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n) +{ + int i; + for (i = 0; i < n; i++) { + if ((i & 15) == 0) + fprintf(ctx->fp, "\t.byte %d", p[i]); + else + fprintf(ctx->fp, ",%d", p[i]); + if ((i & 15) == 15) putc('\n', ctx->fp); + } + if ((n & 15) != 0) putc('\n', ctx->fp); +} + +/* Emit relocation */ +static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym) +{ + switch (ctx->mode) { + case BUILD_elfasm: + if (type) + fprintf(ctx->fp, "\t.long %s-.-4\n", sym); + else + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym); + if (type) + fprintf(ctx->fp, "\t.long %s-.-4\n", sym); + else + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + default: /* BUILD_machasm for relative relocations handled below. */ + fprintf(ctx->fp, "\t.long %s\n", sym); + break; + } +} + +static const char *const jccnames[] = { + "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja", + "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg" +}; + +/* Emit relocation for the incredibly stupid OSX assembler. */ +static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n, + const char *sym) +{ + const char *opname = NULL; + if (--n < 0) goto err; + if (cp[n] == 0xe8) { + opname = "call"; + } else if (cp[n] == 0xe9) { + opname = "jmp"; + } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) { + opname = jccnames[cp[n]-0x80]; + n--; + } else { +err: + fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n", + sym); + exit(1); + } + emit_asm_bytes(ctx, cp, n); + fprintf(ctx->fp, "\t%s %s\n", opname, sym); +} +#else +/* Emit words piecewise as assembler text. */ +static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n) +{ + int i; + for (i = 0; i < n; i += 4) { + if ((i & 15) == 0) + fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i)); + else + fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i)); + if ((i & 15) == 12) putc('\n', ctx->fp); + } + if ((n & 15) != 0) putc('\n', ctx->fp); +} + +/* Emit relocation as part of an instruction. */ +static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n, + const char *sym) +{ + uint32_t ins; + emit_asm_words(ctx, p, n-4); + ins = *(uint32_t *)(p+n-4); +#if LJ_TARGET_ARM + if ((ins & 0xff000000u) == 0xfa000000u) { + fprintf(ctx->fp, "\tblx %s\n", sym); + } else if ((ins & 0x0e000000u) == 0x0a000000u) { + fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b", + "eqnecsccmiplvsvchilsgeltgtle" + 2*(ins >> 28), sym); + } else { + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); + } +#elif LJ_TARGET_PPC + if ((ins >> 26) == 16) { + fprintf(ctx->fp, "\t%s %d, %d, %s\n", + (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym); + } else if ((ins >> 26) == 18) { + fprintf(ctx->fp, "\t%s %s\n", (ins & 1) ? "bl" : "b", sym); + } else { + fprintf(stderr, + "Error: unsupported opcode %08x for %s symbol relocation.\n", + ins, sym); + exit(1); + } +#else +#error "missing relocation support for this architecture" +#endif +} +#endif + +#if LJ_TARGET_ARM +#define ELFASM_PX "%%" +#else +#define ELFASM_PX "@" +#endif + +/* Emit an assembler label. */ +static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc) +{ + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, + "\n\t.globl %s\n" + "\t.hidden %s\n" + "\t.type %s, " ELFASM_PX "%s\n" + "\t.size %s, %d\n" + "%s:\n", + name, name, name, isfunc ? "function" : "object", name, size, name); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\n\t.globl %s\n", name); + if (isfunc) + fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name); + fprintf(ctx->fp, "%s:\n", name); + break; + case BUILD_machasm: + fprintf(ctx->fp, + "\n\t.private_extern %s\n" + "%s:\n", name, name); + break; + default: + break; + } +} + +/* Emit alignment. */ +static void emit_asm_align(BuildCtx *ctx, int bits) +{ + switch (ctx->mode) { + case BUILD_elfasm: + case BUILD_coffasm: + fprintf(ctx->fp, "\t.p2align %d\n", bits); + break; + case BUILD_machasm: + fprintf(ctx->fp, "\t.align %d\n", bits); + break; + default: + break; + } +} + +/* ------------------------------------------------------------------------ */ + +/* Emit assembler source code. */ +void emit_asm(BuildCtx *ctx) +{ + int i, rel; + + fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch); + fprintf(ctx->fp, "\t.text\n"); + emit_asm_align(ctx, 4); + + emit_asm_label(ctx, ctx->beginsym, 0, 0); + if (ctx->mode != BUILD_machasm) + fprintf(ctx->fp, ".Lbegin:\n"); + +#if LJ_TARGET_ARM && defined(__GNUC__) && !defined(__symbian__) + /* This should really be moved into buildvm_arm.dasc. */ + fprintf(ctx->fp, + ".fnstart\n" + ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n" + ".pad #28\n"); +#endif + + for (i = rel = 0; i < ctx->nsym; i++) { + int32_t ofs = ctx->sym[i].ofs; + int32_t next = ctx->sym[i+1].ofs; + emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1); + while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) { + BuildReloc *r = &ctx->reloc[rel]; + int n = r->ofs - ofs; +#if LJ_TARGET_X86ORX64 + if (ctx->mode == BUILD_machasm && r->type != 0) { + emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); + } else { + emit_asm_bytes(ctx, ctx->code+ofs, n); + emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]); + } + ofs += n+4; +#else + emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); + ofs += n; +#endif + rel++; + } +#if LJ_TARGET_X86ORX64 + emit_asm_bytes(ctx, ctx->code+ofs, next-ofs); +#else + emit_asm_words(ctx, ctx->code+ofs, next-ofs); +#endif + } + +#if LJ_TARGET_ARM && defined(__GNUC__) && !defined(__symbian__) + fprintf(ctx->fp, + ".globl lj_err_unwind_arm\n" + ".personality lj_err_unwind_arm\n" + ".fnend\n"); +#endif + + fprintf(ctx->fp, "\n"); + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n"); +#if LJ_TARGET_PPCSPE + /* Soft-float ABI + SPE. */ + fprintf(ctx->fp, "\t.gnu_attribute 4, 2\n\t.gnu_attribute 8, 3\n"); +#elif LJ_TARGET_PPC + /* Hard-float ABI. */ + fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n"); +#endif + /* fallthrough */ + case BUILD_coffasm: + fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident); + break; + case BUILD_machasm: + fprintf(ctx->fp, + "\t.cstring\n" + "\t.ascii \"%s\\0\"\n", ctx->dasm_ident); + break; + default: + break; + } + fprintf(ctx->fp, "\n"); +} + diff --git a/src/luajit2/src/buildvm_fold.c b/src/luajit2/src/buildvm_fold.c new file mode 100644 index 0000000000..b43d2c4870 --- /dev/null +++ b/src/luajit2/src/buildvm_fold.c @@ -0,0 +1,229 @@ +/* +** LuaJIT VM builder: IR folding hash table generator. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_ir.h" + +/* Context for the folding hash table generator. */ +static int lineno; +static int funcidx; +static uint32_t foldkeys[BUILD_MAX_FOLD]; +static uint32_t nkeys; + +/* Try to fill the hash table with keys using the hash parameters. */ +static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol) +{ + uint32_t i; + if (dorol && ((r & 31) == 0 || (r>>5) == 0)) + return 0; /* Avoid zero rotates. */ + memset(htab, 0xff, (sz+1)*sizeof(uint32_t)); + for (i = 0; i < nkeys; i++) { + uint32_t key = foldkeys[i]; + uint32_t k = key & 0xffffff; + uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) : + (((k << (r>>5)) - k) << (r&31))) % sz; + if (htab[h] != 0xffffffff) { /* Collision on primary slot. */ + if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */ + /* Try to move the colliding key, if possible. */ + if (h < sz-1 && htab[h+2] == 0xffffffff) { + uint32_t k2 = htab[h+1] & 0xffffff; + uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) : + (((k2 << (r>>5)) - k2) << (r&31))) % sz; + if (h2 != h+1) return 0; /* Cannot resolve collision. */ + htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */ + } else { + return 0; /* Collision. */ + } + } + htab[h+1] = key; + } else { + htab[h] = key; + } + } + return 1; /* Success, all keys could be stored. */ +} + +/* Print the generated hash table. */ +static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz) +{ + uint32_t i; + fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x", + sz+1, htab[0]); + for (i = 1; i < sz+1; i++) + fprintf(ctx->fp, ",\n0x%08x", htab[i]); + fprintf(ctx->fp, "\n};\n\n"); +} + +/* Exhaustive search for the shortest semi-perfect hash table. */ +static void makehash(BuildCtx *ctx) +{ + uint32_t htab[BUILD_MAX_FOLD*2+1]; + uint32_t sz, r; + /* Search for the smallest hash table with an odd size. */ + for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) { + /* First try all shift hash combinations. */ + for (r = 0; r < 32*32; r++) { + if (tryhash(htab, sz, r, 0)) { + printhash(ctx, htab, sz); + fprintf(ctx->fp, + "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n", + r>>5, r&31, sz); + return; + } + } + /* Then try all rotate hash combinations. */ + for (r = 0; r < 32*32; r++) { + if (tryhash(htab, sz, r, 1)) { + printhash(ctx, htab, sz); + fprintf(ctx->fp, + "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n", + r>>5, r&31, sz); + return; + } + } + } + fprintf(stderr, "Error: search for perfect hash failed\n"); + exit(1); +} + +/* Parse one token of a fold rule. */ +static uint32_t nexttoken(char **pp, int allowlit, int allowany) +{ + char *p = *pp; + if (p) { + uint32_t i; + char *q = strchr(p, ' '); + if (q) *q++ = '\0'; + *pp = q; + if (allowlit && !strncmp(p, "IRFPM_", 6)) { + for (i = 0; irfpm_names[i]; i++) + if (!strcmp(irfpm_names[i], p+6)) + return i; + } else if (allowlit && !strncmp(p, "IRFL_", 5)) { + for (i = 0; irfield_names[i]; i++) + if (!strcmp(irfield_names[i], p+5)) + return i; + } else if (allowlit && !strncmp(p, "IRCALL_", 7)) { + for (i = 0; ircall_names[i]; i++) + if (!strcmp(ircall_names[i], p+7)) + return i; + } else if (allowlit && !strncmp(p, "IRCONV_", 7)) { + for (i = 0; irt_names[i]; i++) { + const char *r = strchr(p+7, '_'); + if (r && !strncmp(irt_names[i], p+7, r-(p+7))) { + uint32_t j; + for (j = 0; irt_names[j]; j++) + if (!strcmp(irt_names[j], r+1)) + return (i << 5) + j; + } + } + } else if (allowlit && *p >= '0' && *p <= '9') { + for (i = 0; *p >= '0' && *p <= '9'; p++) + i = i*10 + (*p - '0'); + if (*p == '\0') + return i; + } else if (allowany && !strcmp("any", p)) { + return allowany; + } else { + for (i = 0; ir_names[i]; i++) + if (!strcmp(ir_names[i], p)) + return i; + } + fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno); + exit(1); + } + return 0; +} + +/* Parse a fold rule. */ +static void foldrule(char *p) +{ + uint32_t op = nexttoken(&p, 0, 0); + uint32_t left = nexttoken(&p, 0, 0x7f); + uint32_t right = nexttoken(&p, 1, 0x3ff); + uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right; + uint32_t i; + if (nkeys >= BUILD_MAX_FOLD) { + fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n"); + exit(1); + } + /* Simple insertion sort to detect duplicates. */ + for (i = nkeys; i > 0; i--) { + if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff)) + break; + if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) { + fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno); + exit(1); + } + foldkeys[i] = foldkeys[i-1]; + } + foldkeys[i] = key; + nkeys++; +} + +/* Emit C source code for IR folding hash table. */ +void emit_fold(BuildCtx *ctx) +{ + char buf[256]; /* We don't care about analyzing lines longer than that. */ + const char *fname = ctx->args[0]; + FILE *fp; + + if (fname == NULL) { + fprintf(stderr, "Error: missing input filename\n"); + exit(1); + } + + if (fname[0] == '-' && fname[1] == '\0') { + fp = stdin; + } else { + fp = fopen(fname, "r"); + if (!fp) { + fprintf(stderr, "Error: cannot open input file '%s': %s\n", + fname, strerror(errno)); + exit(1); + } + } + + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n"); + + lineno = 0; + funcidx = 0; + nkeys = 0; + while (fgets(buf, sizeof(buf), fp) != NULL) { + lineno++; + /* The prefix must be at the start of a line, otherwise it's ignored. */ + if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) { + char *p = buf+sizeof(FOLDDEF_PREFIX)-1; + char *q = strchr(p, ')'); + if (p[0] == '(' && q) { + p++; + *q = '\0'; + foldrule(p); + } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) { + p += 2; + *q = '\0'; + if (funcidx) + fprintf(ctx->fp, ",\n"); + if (p[-2] == 'X') + fprintf(ctx->fp, " %s", p); + else + fprintf(ctx->fp, " fold_%s", p); + funcidx++; + } else { + buf[strlen(buf)-1] = '\0'; + fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n", + FOLDDEF_PREFIX, p, lineno); + exit(1); + } + } + } + fclose(fp); + fprintf(ctx->fp, "\n};\n\n"); + + makehash(ctx); +} + diff --git a/src/luajit2/src/buildvm_lib.c b/src/luajit2/src/buildvm_lib.c new file mode 100644 index 0000000000..8d9bcea3e2 --- /dev/null +++ b/src/luajit2/src/buildvm_lib.c @@ -0,0 +1,377 @@ +/* +** LuaJIT VM builder: library definition compiler. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "buildvm.h" +#include "lj_obj.h" +#include "lj_lib.h" + +/* Context for library definitions. */ +static uint8_t obuf[8192]; +static uint8_t *optr; +static char modname[80]; +static size_t modnamelen; +static char funcname[80]; +static int modstate, regfunc; +static int ffid, recffid, ffasmfunc; + +enum { + REGFUNC_OK, + REGFUNC_NOREG, + REGFUNC_NOREGUV +}; + +static void libdef_name(const char *p, int kind) +{ + size_t n = strlen(p); + if (kind != LIBINIT_STRING) { + if (n > modnamelen && p[modnamelen] == '_' && + !strncmp(p, modname, modnamelen)) { + p += modnamelen+1; + n -= modnamelen+1; + } + } + if (n > LIBINIT_MAXSTR) { + fprintf(stderr, "Error: string too long: '%s'\n", p); + exit(1); + } + if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */ + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = (uint8_t)(n | kind); + memcpy(optr, p, n); + optr += n; +} + +static void libdef_endmodule(BuildCtx *ctx) +{ + if (modstate != 0) { + char line[80]; + const uint8_t *p; + int n; + if (modstate == 1) + fprintf(ctx->fp, " (lua_CFunction)0"); + fprintf(ctx->fp, "\n};\n"); + fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n", + LABEL_PREFIX_LIBINIT, modname); + line[0] = '\0'; + for (n = 0, p = obuf; p < optr; p++) { + n += sprintf(line+n, "%d,", *p); + if (n >= 75) { + fprintf(ctx->fp, "%s\n", line); + n = 0; + line[0] = '\0'; + } + } + fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END); + } +} + +static void libdef_module(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + libdef_endmodule(ctx); + optr = obuf; + *optr++ = (uint8_t)ffid; + *optr++ = (uint8_t)ffasmfunc; + *optr++ = 0; /* Hash table size. */ + modstate = 1; + fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p); + fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p); + fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n", + LABEL_PREFIX_LIBCF, p); + } + modnamelen = strlen(p); + if (modnamelen > sizeof(modname)-1) { + fprintf(stderr, "Error: module name too long: '%s'\n", p); + exit(1); + } + strcpy(modname, p); +} + +static int find_ffofs(BuildCtx *ctx, const char *name) +{ + int i; + for (i = 0; i < ctx->nglob; i++) { + const char *gl = ctx->globnames[i]; + if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) { + return (int)((uint8_t *)ctx->glob[i] - ctx->code); + } + } + fprintf(stderr, "Error: undefined fast function %s%s\n", + LABEL_PREFIX_FF, name); + exit(1); +} + +static void libdef_func(BuildCtx *ctx, char *p, int arg) +{ + if (arg != LIBINIT_CF) + ffasmfunc++; + if (ctx->mode == BUILD_libdef) { + if (modstate == 0) { + fprintf(stderr, "Error: no module for function definition %s\n", p); + exit(1); + } + if (regfunc == REGFUNC_NOREG) { + if (optr+1 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_FFID; + } else { + if (arg != LIBINIT_ASM_) { + if (modstate != 1) fprintf(ctx->fp, ",\n"); + modstate = 2; + fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p); + } + if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */ + libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg); + } + } else if (ctx->mode == BUILD_ffdef) { + fprintf(ctx->fp, "FFDEF(%s)\n", p); + } else if (ctx->mode == BUILD_recdef) { + if (strlen(p) > sizeof(funcname)-1) { + fprintf(stderr, "Error: function name too long: '%s'\n", p); + exit(1); + } + strcpy(funcname, p); + } else if (ctx->mode == BUILD_vmdef) { + int i; + for (i = 1; p[i] && modname[i-1]; i++) + if (p[i] == '_') p[i] = '.'; + fprintf(ctx->fp, "\"%s\",\n", p); + } else if (ctx->mode == BUILD_bcdef) { + if (arg != LIBINIT_CF) + fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p)); + } + ffid++; + regfunc = REGFUNC_OK; +} + +static uint32_t find_rec(char *name) +{ + char *p = (char *)obuf; + uint32_t n; + for (n = 2; *p; n++) { + if (strcmp(p, name) == 0) + return n; + p += strlen(p)+1; + } + if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + strcpy(p, name); + return n; +} + +static void libdef_rec(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_recdef) { + char *q; + uint32_t n; + for (; recffid+1 < ffid; recffid++) + fprintf(ctx->fp, ",\n0"); + recffid = ffid; + if (*p == '.') p = funcname; + q = strchr(p, ' '); + if (q) *q++ = '\0'; + n = find_rec(p); + if (q) + fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q); + else + fprintf(ctx->fp, ",\n0x%02x00", n); + } +} + +static void memcpy_endian(void *dst, void *src, size_t n) +{ + union { uint8_t b; uint32_t u; } host_endian; + host_endian.u = 1; + if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) { + memcpy(dst, src, n); + } else { + size_t i; + for (i = 0; i < n; i++) + ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1]; + } +} + +static void libdef_push(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + int len = (int)strlen(p); + if (*p == '"') { + if (len > 1 && p[len-1] == '"') { + p[len-1] = '\0'; + libdef_name(p+1, LIBINIT_STRING); + return; + } + } else if (*p >= '0' && *p <= '9') { + char *ep; + double d = strtod(p, &ep); + if (*ep == '\0') { + if (optr+1+sizeof(double) > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_NUMBER; + memcpy_endian(optr, &d, sizeof(double)); + optr += sizeof(double); + return; + } + } else if (!strcmp(p, "lastcl")) { + if (optr+1 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_LASTCL; + return; + } else if (len > 4 && !strncmp(p, "top-", 4)) { + if (optr+2 > obuf+sizeof(obuf)) { + fprintf(stderr, "Error: output buffer overflow\n"); + exit(1); + } + *optr++ = LIBINIT_COPY; + *optr++ = (uint8_t)atoi(p+4); + return; + } + fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p); + exit(1); + } +} + +static void libdef_set(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(arg); + if (ctx->mode == BUILD_libdef) { + if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */ + libdef_name(p, LIBINIT_STRING); + *optr++ = LIBINIT_SET; + obuf[2]++; /* Bump hash table size. */ + } +} + +static void libdef_regfunc(BuildCtx *ctx, char *p, int arg) +{ + UNUSED(ctx); UNUSED(p); + regfunc = arg; +} + +typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg); + +typedef struct LibDefHandler { + const char *suffix; + const char *stop; + const LibDefFunc func; + const int arg; +} LibDefHandler; + +static const LibDefHandler libdef_handlers[] = { + { "MODULE_", " \t\r\n", libdef_module, 0 }, + { "CF(", ")", libdef_func, LIBINIT_CF }, + { "ASM(", ")", libdef_func, LIBINIT_ASM }, + { "ASM_(", ")", libdef_func, LIBINIT_ASM_ }, + { "REC(", ")", libdef_rec, 0 }, + { "PUSH(", ")", libdef_push, 0 }, + { "SET(", ")", libdef_set, 0 }, + { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV }, + { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG }, + { NULL, NULL, (LibDefFunc)0, 0 } +}; + +/* Emit C source code for library function definitions. */ +void emit_lib(BuildCtx *ctx) +{ + const char *fname; + + if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef || + ctx->mode == BUILD_recdef) + fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); + else if (ctx->mode == BUILD_vmdef) + fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n"); + if (ctx->mode == BUILD_recdef) + fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100"); + recffid = ffid = FF_C+1; + ffasmfunc = 0; + + while ((fname = *ctx->args++)) { + char buf[256]; /* We don't care about analyzing lines longer than that. */ + FILE *fp; + if (fname[0] == '-' && fname[1] == '\0') { + fp = stdin; + } else { + fp = fopen(fname, "r"); + if (!fp) { + fprintf(stderr, "Error: cannot open input file '%s': %s\n", + fname, strerror(errno)); + exit(1); + } + } + modstate = 0; + regfunc = REGFUNC_OK; + while (fgets(buf, sizeof(buf), fp) != NULL) { + char *p; + for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) { + const LibDefHandler *ldh; + p += sizeof(LIBDEF_PREFIX)-1; + for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) { + size_t n, len = strlen(ldh->suffix); + if (!strncmp(p, ldh->suffix, len)) { + p += len; + n = ldh->stop ? strcspn(p, ldh->stop) : 0; + if (!p[n]) break; + p[n] = '\0'; + ldh->func(ctx, p, ldh->arg); + p += n+1; + break; + } + } + if (ldh->suffix == NULL) { + buf[strlen(buf)-1] = '\0'; + fprintf(stderr, "Error: unknown library definition tag %s%s\n", + LIBDEF_PREFIX, p); + exit(1); + } + } + } + fclose(fp); + if (ctx->mode == BUILD_libdef) { + libdef_endmodule(ctx); + } + } + + if (ctx->mode == BUILD_ffdef) { + fprintf(ctx->fp, "\n#undef FFDEF\n\n"); + fprintf(ctx->fp, + "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n", + ffasmfunc); + } else if (ctx->mode == BUILD_vmdef) { + fprintf(ctx->fp, "}\n\n"); + } else if (ctx->mode == BUILD_bcdef) { + int i; + fprintf(ctx->fp, "\n};\n\n"); + fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n"); + fprintf(ctx->fp, "BCDEF(BCMODE)\n"); + for (i = ffasmfunc-1; i > 0; i--) + fprintf(ctx->fp, "BCMODE_FF,\n"); + fprintf(ctx->fp, "BCMODE_FF\n};\n\n"); + } else if (ctx->mode == BUILD_recdef) { + char *p = (char *)obuf; + fprintf(ctx->fp, "\n};\n\n"); + fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n" + "recff_nyi,\n" + "recff_c"); + while (*p) { + fprintf(ctx->fp, ",\nrecff_%s", p); + p += strlen(p)+1; + } + fprintf(ctx->fp, "\n};\n\n"); + } +} + diff --git a/src/luajit2/src/buildvm_peobj.c b/src/luajit2/src/buildvm_peobj.c new file mode 100644 index 0000000000..b97a5b0388 --- /dev/null +++ b/src/luajit2/src/buildvm_peobj.c @@ -0,0 +1,334 @@ +/* +** LuaJIT VM builder: PE object emitter. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Only used for building on Windows, since we cannot assume the presence +** of a suitable assembler. The host and target byte order must match. +*/ + +#include "buildvm.h" +#include "lj_bc.h" + +#if LJ_TARGET_X86ORX64 + +/* Context for PE object emitter. */ +static char *strtab; +static size_t strtabofs; + +/* -- PE object definitions ----------------------------------------------- */ + +/* PE header. */ +typedef struct PEheader { + uint16_t arch; + uint16_t nsects; + uint32_t time; + uint32_t symtabofs; + uint32_t nsyms; + uint16_t opthdrsz; + uint16_t flags; +} PEheader; + +/* PE section. */ +typedef struct PEsection { + char name[8]; + uint32_t vsize; + uint32_t vaddr; + uint32_t size; + uint32_t ofs; + uint32_t relocofs; + uint32_t lineofs; + uint16_t nreloc; + uint16_t nline; + uint32_t flags; +} PEsection; + +/* PE relocation. */ +typedef struct PEreloc { + uint32_t vaddr; + uint32_t symidx; + uint16_t type; +} PEreloc; + +/* Cannot use sizeof, because it pads up to the max. alignment. */ +#define PEOBJ_RELOC_SIZE (4+4+2) + +/* PE symbol table entry. */ +typedef struct PEsym { + union { + char name[8]; + uint32_t nameref[2]; + } n; + uint32_t value; + int16_t sect; + uint16_t type; + uint8_t scl; + uint8_t naux; +} PEsym; + +/* PE symbol table auxiliary entry for a section. */ +typedef struct PEsymaux { + uint32_t size; + uint16_t nreloc; + uint16_t nline; + uint32_t cksum; + uint16_t assoc; + uint8_t comdatsel; + uint8_t unused[3]; +} PEsymaux; + +/* Cannot use sizeof, because it pads up to the max. alignment. */ +#define PEOBJ_SYM_SIZE (8+4+2+2+1+1) + +/* PE object CPU specific defines. */ +#if LJ_TARGET_X86 +#define PEOBJ_ARCH_TARGET 0x014c +#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */ +#define PEOBJ_RELOC_DIR32 0x06 +#elif LJ_TARGET_X64 +#define PEOBJ_ARCH_TARGET 0x8664 +#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */ +#define PEOBJ_RELOC_DIR32 0x02 +#define PEOBJ_RELOC_ADDR32NB 0x03 +#endif + +/* Section numbers (0-based). */ +enum { + PEOBJ_SECT_ABS = -2, + PEOBJ_SECT_UNDEF = -1, + PEOBJ_SECT_TEXT, +#if LJ_TARGET_X64 + PEOBJ_SECT_PDATA, + PEOBJ_SECT_XDATA, +#endif + PEOBJ_SECT_RDATA_Z, + PEOBJ_NSECTIONS +}; + +/* Symbol types. */ +#define PEOBJ_TYPE_NULL 0 +#define PEOBJ_TYPE_FUNC 0x20 + +/* Symbol storage class. */ +#define PEOBJ_SCL_EXTERN 2 +#define PEOBJ_SCL_STATIC 3 + +/* -- PE object emitter --------------------------------------------------- */ + +/* Emit PE object symbol. */ +static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value, + int sect, int type, int scl) +{ + PEsym sym; + size_t len = strlen(name); + if (!strtab) { /* Pass 1: only calculate string table length. */ + if (len > 8) strtabofs += len+1; + return; + } + if (len <= 8) { + memcpy(sym.n.name, name, len); + memset(sym.n.name+len, 0, 8-len); + } else { + sym.n.nameref[0] = 0; + sym.n.nameref[1] = (uint32_t)strtabofs; + memcpy(strtab + strtabofs, name, len); + strtab[strtabofs+len] = 0; + strtabofs += len+1; + } + sym.value = value; + sym.sect = (int16_t)(sect+1); /* 1-based section number. */ + sym.type = (uint16_t)type; + sym.scl = (uint8_t)scl; + sym.naux = 0; + owrite(ctx, &sym, PEOBJ_SYM_SIZE); +} + +/* Emit PE object section symbol. */ +static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect) +{ + PEsym sym; + PEsymaux aux; + if (!strtab) return; /* Pass 1: no output. */ + memcpy(sym.n.name, pesect[sect].name, 8); + sym.value = 0; + sym.sect = (int16_t)(sect+1); /* 1-based section number. */ + sym.type = PEOBJ_TYPE_NULL; + sym.scl = PEOBJ_SCL_STATIC; + sym.naux = 1; + owrite(ctx, &sym, PEOBJ_SYM_SIZE); + memset(&aux, 0, sizeof(PEsymaux)); + aux.size = pesect[sect].size; + aux.nreloc = pesect[sect].nreloc; + owrite(ctx, &aux, PEOBJ_SYM_SIZE); +} + +/* Emit Windows PE object file. */ +void emit_peobj(BuildCtx *ctx) +{ + PEheader pehdr; + PEsection pesect[PEOBJ_NSECTIONS]; + uint32_t sofs; + int i, nrsym; + union { uint8_t b; uint32_t u; } host_endian; + + host_endian.u = 1; + if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) { + fprintf(stderr, "Error: different byte order for host and target\n"); + exit(1); + } + + sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection); + + /* Fill in PE sections. */ + memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection)); + memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1); + pesect[PEOBJ_SECT_TEXT].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz); + pesect[PEOBJ_SECT_TEXT].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE; + /* Flags: 60 = read+execute, 50 = align16, 20 = code. */ + pesect[PEOBJ_SECT_TEXT].flags = 0x60500020; + +#if LJ_TARGET_X64 + memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1); + pesect[PEOBJ_SECT_PDATA].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_PDATA].size = 3*4); + pesect[PEOBJ_SECT_PDATA].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 3) * PEOBJ_RELOC_SIZE; + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_PDATA].flags = 0x40300040; + + memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1); + pesect[PEOBJ_SECT_XDATA].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4); /* See below. */ + pesect[PEOBJ_SECT_XDATA].relocofs = sofs; + sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE; + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_XDATA].flags = 0x40300040; +#endif + + memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1); + pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs; + sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1); + /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ + pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040; + + /* Fill in PE header. */ + pehdr.arch = PEOBJ_ARCH_TARGET; + pehdr.nsects = PEOBJ_NSECTIONS; + pehdr.time = 0; /* Timestamp is optional. */ + pehdr.symtabofs = sofs; + pehdr.opthdrsz = 0; + pehdr.flags = 0; + + /* Compute the size of the symbol table: + ** @feat.00 + nsections*2 + ** + asm_start + nsym + ** + nrsym + */ + nrsym = ctx->nrelocsym; + pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym; +#if LJ_TARGET_X64 + pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */ +#endif + + /* Write PE object header and all sections. */ + owrite(ctx, &pehdr, sizeof(PEheader)); + owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS); + + /* Write .text section. */ + owrite(ctx, ctx->code, ctx->codesz); + for (i = 0; i < ctx->nreloc; i++) { + PEreloc reloc; + reloc.vaddr = (uint32_t)ctx->reloc[i].ofs; + reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */ + reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } + +#if LJ_TARGET_X64 + { /* Write .pdata section. */ + uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */ + PEreloc reloc; + pdata[0] = 0; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 0; + owrite(ctx, &pdata, sizeof(pdata)); + reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } + { /* Write .xdata section. */ + uint16_t xdata[8+2]; + PEreloc reloc; + xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhander/ehandler, prolog size 0. */ + xdata[1] = 5; /* Number of unwind codes, no frame pointer. */ + xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */ + xdata[3] = 0x3000; /* Push rbx. */ + xdata[4] = 0x6000; /* Push rsi. */ + xdata[5] = 0x7000; /* Push rdi. */ + xdata[6] = 0x5000; /* Push rbp. */ + xdata[7] = 0; /* Alignment. */ + xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */ + owrite(ctx, &xdata, sizeof(xdata)); + reloc.vaddr = sizeof(xdata)-4; reloc.symidx = 1+2+nrsym+2+2; + reloc.type = PEOBJ_RELOC_ADDR32NB; + owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); + } +#endif + + /* Write .rdata$Z section. */ + owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1); + + /* Write symbol table. */ + strtab = NULL; /* 1st pass: collect string sizes. */ + for (;;) { + strtabofs = 4; + /* Mark as SafeSEH compliant. */ + emit_peobj_sym(ctx, "@feat.00", 1, + PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC); + + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT); + for (i = 0; i < nrsym; i++) + emit_peobj_sym(ctx, ctx->relocsym[i], 0, + PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); + +#if LJ_TARGET_X64 + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA); + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA); + emit_peobj_sym(ctx, "lj_err_unwind_win64", 0, + PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); +#endif + + emit_peobj_sym(ctx, ctx->beginsym, 0, + PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN); + for (i = 0; i < ctx->nsym; i++) + emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs, + PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); + + emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z); + + if (strtab) + break; + /* 2nd pass: alloc strtab, write syms and copy strings. */ + strtab = (char *)malloc(strtabofs); + *(uint32_t *)strtab = (uint32_t)strtabofs; + } + + /* Write string table. */ + owrite(ctx, strtab, strtabofs); +} + +#else + +void emit_peobj(BuildCtx *ctx) +{ + UNUSED(ctx); + fprintf(stderr, "Error: no PE object support for this target\n"); + exit(1); +} + +#endif diff --git a/src/luajit2/src/buildvm_ppc.dasc b/src/luajit2/src/buildvm_ppc.dasc new file mode 100644 index 0000000000..4e3168830e --- /dev/null +++ b/src/luajit2/src/buildvm_ppc.dasc @@ -0,0 +1,3732 @@ +|// Low-level VM code for PowerPC CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +| +|.arch ppc +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|.if not SPE +|.error "No support for plain PowerPC CPUs (yet)" +|.endif +| +|// Note: The ragged indentation of the instructions is intentional. +|// The starting columns indicate data dependencies. +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +|// Don't use: r1 = sp, r2 and r13 = reserved and/or small data area ptr +| +|// The following must be C callee-save (but BASE is often refetched). +|.define BASE, r14 // Base of current Lua stack frame. +|.define KBASE, r15 // Constants of current Lua function. +|.define PC, r16 // Next PC. +|.define DISPATCH, r17 // Opcode dispatch table. +|.define LREG, r18 // Register holding lua_State (also in SAVE_L). +|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8. +| +|// Constants for vectorized type-comparisons (hi+low GPR). C callee-save. +|.define TISNUM, r22 +|.if SPE +|.define TISSTR, r23 +|.define TISTAB, r24 +|.define TISFUNC, r25 +|.define TISNIL, r26 +|.define TOBIT, r27 +|.define ZERO, TOBIT // Zero in lo word. +|.endif +| +|// The following temporaries are not saved across C calls, except for RA. +|.define RA, r20 // Callee-save. +|.define RB, r10 +|.define RC, r11 +|.define RD, r12 +|.define INS, r7 // Overlaps CARG5. +| +|.define TMP0, r0 +|.define TMP1, r8 +|.define TMP2, r9 +|.define TMP3, r6 // Overlaps CARG4. +| +|// Saved temporaries. +|.define SAVE0, r21 +| +|// Calling conventions. +|.define CARG1, r3 +|.define CARG2, r4 +|.define CARG3, r5 +|.define CARG4, r6 // Overlaps TMP3. +|.define CARG5, r7 // Overlaps INS. +| +|.define CRET1, r3 +|.define CRET2, r4 +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|.if SPE +|.define SAVE_LR, 180(sp) +|.define CFRAME_SPACE, 176 // Delta for sp. +|// Back chain for sp: 176(sp) <-- sp entering interpreter +|.define SAVE_r31, 168(sp) // 64 bit register saves. +|.define SAVE_r30, 160(sp) +|.define SAVE_r29, 152(sp) +|.define SAVE_r28, 144(sp) +|.define SAVE_r27, 136(sp) +|.define SAVE_r26, 128(sp) +|.define SAVE_r25, 120(sp) +|.define SAVE_r24, 112(sp) +|.define SAVE_r23, 104(sp) +|.define SAVE_r22, 96(sp) +|.define SAVE_r21, 88(sp) +|.define SAVE_r20, 80(sp) +|.define SAVE_r19, 72(sp) +|.define SAVE_r18, 64(sp) +|.define SAVE_r17, 56(sp) +|.define SAVE_r16, 48(sp) +|.define SAVE_r15, 40(sp) +|.define SAVE_r14, 32(sp) +|.define SAVE_ERRF, 28(sp) // 32 bit C frame info. +|.define SAVE_NRES, 24(sp) +|.define SAVE_CFRAME, 20(sp) +|.define SAVE_L, 16(sp) +|.define SAVE_PC, 12(sp) +|.define SAVE_MULTRES, 8(sp) +|// Next frame lr: 4(sp) +|// Back chain for sp: 0(sp) <-- sp while in interpreter +| +|.macro save_, reg; evstdd reg, SAVE_..reg; .endmacro +|.macro rest_, reg; evldd reg, SAVE_..reg; .endmacro +|.endif +| +|.macro saveregs +| stwu sp, -CFRAME_SPACE(sp) +| save_ r14; save_ r15; save_ r16; save_ r17; save_ r18; save_ r19 +| mflr r0 +| save_ r20; save_ r21; save_ r22; save_ r23; save_ r24; save_ r25 +| stw r0, SAVE_LR +| save_ r26; save_ r27; save_ r28; save_ r29; save_ r30; save_ r31 +|.endmacro +| +|.macro restoreregs +| lwz r0, SAVE_LR +| rest_ r14; rest_ r15; rest_ r16; rest_ r17; rest_ r18; rest_ r19 +| mtlr r0 +| rest_ r20; rest_ r21; rest_ r22; rest_ r23; rest_ r24; rest_ r25 +| rest_ r26; rest_ r27; rest_ r28; rest_ r29; rest_ r30; rest_ r31 +| addi sp, sp, CFRAME_SPACE +|.endmacro +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State, LREG +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS8, int +|.type TRACE, GCtrace +| +|//----------------------------------------------------------------------- +| +|// These basic macros should really be part of DynASM. +|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro +|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro +|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro +|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro +|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro +| +|// Trap for not-yet-implemented parts. +|.macro NYI; tw 4, sp, sp; .endmacro +| +|//----------------------------------------------------------------------- +| +|// Access to frame relative to BASE. +|.define FRAME_PC, -8 +|.define FRAME_FUNC, -4 +| +|// Instruction decode. +|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro +|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro +|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro +|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro +|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro +| +|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro +|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro +| +|// Instruction fetch. +|.macro ins_NEXT1 +| lwz INS, 0(PC) +| addi PC, PC, 4 +|.endmacro +|// Instruction decode+dispatch. +|.macro ins_NEXT2 +| decode_OP4 TMP1, INS +| decode_RB8 RB, INS +| decode_RD8 RD, INS +| lwzx TMP0, DISPATCH, TMP1 +| decode_RA8 RA, INS +| decode_RC8 RC, INS +| mtctr TMP0 +| bctr +|.endmacro +|.macro ins_NEXT +| ins_NEXT1 +| ins_NEXT2 +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +| .define ins_next1, ins_NEXT1 +| .define ins_next2, ins_NEXT2 +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| .macro ins_next +| b ->ins_next +| .endmacro +| .macro ins_next1 +| .endmacro +| .macro ins_next2 +| b ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC +| lwz PC, LFUNC:RB->pc +| lwz INS, 0(PC) +| addi PC, PC, 4 +| decode_OP4 TMP1, INS +| decode_RA8 RA, INS +| lwzx TMP0, DISPATCH, TMP1 +| add RA, RA, BASE +| mtctr TMP0 +| bctr +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC +| stw PC, FRAME_PC(BASE) +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.if SPE +|.macro checknum, reg; evcmpltu reg, TISNUM; .endmacro +|.macro checkstr, reg; evcmpeq reg, TISSTR; .endmacro +|.macro checktab, reg; evcmpeq reg, TISTAB; .endmacro +|.macro checkfunc, reg; evcmpeq reg, TISFUNC; .endmacro +|.macro checknil, reg; evcmpeq reg, TISNIL; .endmacro +|.macro checkok, label; blt label; .endmacro +|.macro checkfail, label; bge label; .endmacro +|.macro checkanyfail, label; bns label; .endmacro +|.macro checkallok, label; bso label; .endmacro +|.endif +| +|.macro branch_RD +| srwi TMP0, RD, 1 +| add PC, PC, TMP0 +| addis PC, PC, -(BCBIAS_J*4 >> 16) +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|.macro hotloop +| NYI +|.endmacro +| +|.macro hotcall +| NYI +|.endmacro +| +|// Set current VM state. Uses TMP0. +|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro +|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro +| +|// Move table write barrier back. Overwrites mark and tmp. +|.macro barrierback, tab, mark, tmp +| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH) +| // Assumes LJ_GC_BLACK is 0x04. +| rlwinm mark, mark, 0, 30, 28 // black2gray(tab) +| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH) +| stb mark, tab->marked +| stw tmp, tab->gclist +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | // See vm_return. Also: TMP2 = previous base. + | andi. TMP0, PC, FRAME_P + | evsplati TMP1, LJ_TTRUE + | beq ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame. + | mr BASE, TMP2 // Restore caller base. + | // Prepending may overwrite the pcall frame, so do it at the end. + | stwu TMP1, FRAME_PC(RA) // Prepend true to results. + | + |->vm_returnc: + | andi. TMP0, PC, FRAME_TYPE + | addi RD, RD, 8 // RD = (nresults+1)*8. + | mr MULTRES, RD + | beq ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return + | // TMP0 = PC & FRAME_TYPE + | cmpwi TMP0, FRAME_C + | rlwinm TMP2, PC, 0, 0, 28 + | li_vmstate C + | sub TMP2, BASE, TMP2 // TMP2 = previous base. + | bne ->vm_returnp + | + | addic. TMP1, RD, -8 + | stw TMP2, L->base + | lwz TMP2, SAVE_NRES + | subi BASE, BASE, 8 + | st_vmstate + | slwi TMP2, TMP2, 3 + | beq >2 + |1: + | addic. TMP1, TMP1, -8 + | evldd TMP0, 0(RA) + | addi RA, RA, 8 + | evstdd TMP0, 0(BASE) + | addi BASE, BASE, 8 + | bne <1 + | + |2: + | cmpw TMP2, RD // More/less results wanted? + | bne >6 + |3: + | stw BASE, L->top // Store new top. + | + |->vm_leave_cp: + | lwz TMP0, SAVE_CFRAME // Restore previous C frame. + | li CRET1, 0 // Ok return status for vm_pcall. + | stw TMP0, L->cframe + | + |->vm_leave_unw: + | restoreregs + | blr + | + |6: + | ble >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | lwz TMP1, L->maxstack + | cmplw BASE, TMP1 + | bge >8 + | evstdd TISNIL, 0(BASE) + | addi RD, RD, 8 + | addi BASE, BASE, 8 + | b <2 + | + |7: // Less results wanted. + | sub TMP0, RD, TMP2 + | cmpwi TMP2, 0 // LUA_MULTRET+1 case? + | sub TMP0, BASE, TMP0 // Subtract the difference. + | iseleq BASE, BASE, TMP0 // Either keep top or shrink it. + | b <3 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | stw BASE, L->top // Save current top held in BASE (yes). + | mr SAVE0, RD + | mr CARG2, TMP2 + | mr CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | lwz TMP2, SAVE_NRES + | mr RD, SAVE0 + | slwi TMP2, TMP2, 3 + | lwz BASE, L->top // Need the (realloced) L->top in BASE. + | b <2 + | + |->vm_unwind_c: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + | mr sp, CARG1 + | mr CRET1, CARG2 + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | lwz L, SAVE_L + | li TMP0, ~LJ_VMST_C + | lwz GL:TMP1, L->glref + | stw TMP0, GL:TMP1->vmstate + | b ->vm_leave_unw + | + |->vm_unwind_ff: // Unwind C stack, return from ff pcall. + | // (void *cframe) + | rlwinm sp, CARG1, 0, 0, 29 + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | lwz L, SAVE_L + | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants. + | evsplati TISFUNC, LJ_TFUNC + | lus TOBIT, 0x4338 + | evsplati TISTAB, LJ_TTAB + | li TMP0, 0 + | lwz BASE, L->base + | evmergelo TOBIT, TOBIT, TMP0 + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | evsplati TISSTR, LJ_TSTR + | li TMP1, LJ_TFALSE + | evsplati TISNIL, LJ_TNIL + | li_vmstate INTERP + | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame. + | la RA, -8(BASE) // Results start at BASE-8. + | addi DISPATCH, DISPATCH, GG_G2DISP + | stw TMP1, 0(RA) // Prepend false to error message. + | li RD, 16 // 2 results: false + error message. + | st_vmstate + | b ->vm_returnc + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | li CARG2, LUA_MINSTACK + | b >2 + | + |->vm_growstack_l: // Grow stack for Lua function. + | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC + | add RC, BASE, RC + | sub RA, RA, BASE + | stw BASE, L->base + | addi PC, PC, 4 // Must point after first instruction. + | stw RC, L->top + | srwi CARG2, RA, 3 + |2: + | // L->base = new base, L->top = top + | stw PC, SAVE_PC + | mr CARG1, L + | bl extern lj_state_growstack // (lua_State *L, int n) + | lwz BASE, L->base + | lwz RC, L->top + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | sub RC, RC, BASE + | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + | mr L, CARG1 + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | mr BASE, CARG2 + | lbz TMP1, L->status + | stw L, SAVE_L + | li PC, FRAME_CP + | addi TMP0, sp, CFRAME_RESUME + | addi DISPATCH, DISPATCH, GG_G2DISP + | stw CARG3, SAVE_NRES + | cmplwi TMP1, 0 + | stw CARG3, SAVE_ERRF + | stw TMP0, L->cframe + | stw CARG3, SAVE_CFRAME + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | beq >3 + | + | // Resume after yield (like a return). + | mr RA, BASE + | lwz BASE, L->base + | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants. + | lwz TMP1, L->top + | evsplati TISFUNC, LJ_TFUNC + | lus TOBIT, 0x4338 + | evsplati TISTAB, LJ_TTAB + | lwz PC, FRAME_PC(BASE) + | li TMP2, 0 + | evsplati TISSTR, LJ_TSTR + | sub RD, TMP1, BASE + | evmergelo TOBIT, TOBIT, TMP2 + | stb CARG3, L->status + | andi. TMP0, PC, FRAME_TYPE + | li_vmstate INTERP + | addi RD, RD, 8 + | evsplati TISNIL, LJ_TNIL + | mr MULTRES, RD + | st_vmstate + | beq ->BC_RET_Z + | b ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | li PC, FRAME_CP + | stw CARG4, SAVE_ERRF + | b >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | li PC, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + | lwz TMP1, L:CARG1->cframe + | stw CARG3, SAVE_NRES + | mr L, CARG1 + | stw CARG1, SAVE_L + | mr BASE, CARG2 + | stw sp, L->cframe // Add our C frame to cframe chain. + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | stw TMP1, SAVE_CFRAME + | addi DISPATCH, DISPATCH, GG_G2DISP + | + |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). + | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call). + | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants. + | lwz TMP1, L->top + | evsplati TISFUNC, LJ_TFUNC + | add PC, PC, BASE + | evsplati TISTAB, LJ_TTAB + | lus TOBIT, 0x4338 + | li TMP0, 0 + | sub PC, PC, TMP2 // PC = frame delta + frame type + | evsplati TISSTR, LJ_TSTR + | sub NARGS8:RC, TMP1, BASE + | evmergelo TOBIT, TOBIT, TMP0 + | li_vmstate INTERP + | evsplati TISNIL, LJ_TNIL + | st_vmstate + | + |->vm_call_dispatch: + | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC + | li TMP0, -8 + | evlddx LFUNC:RB, BASE, TMP0 + | checkfunc LFUNC:RB + | checkfail ->vmeta_call + | + |->vm_call_dispatch_f: + | ins_call + | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + | mr L, CARG1 + | lwz TMP0, L:CARG1->stack + | stw CARG1, SAVE_L + | lwz TMP1, L->top + | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. + | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). + | lwz TMP1, L->cframe + | stw sp, L->cframe // Add our C frame to cframe chain. + | li TMP2, 0 + | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. + | stw TMP2, SAVE_ERRF // No error function. + | stw TMP1, SAVE_CFRAME + | mtctr CARG4 + | bctrl // (lua_State *L, lua_CFunction func, void *ud) + | mr. BASE, CRET1 + | lwz DISPATCH, L->glref // Setup pointer to dispatch table. + | li PC, FRAME_CP + | addi DISPATCH, DISPATCH, GG_G2DISP + | bne <3 // Else continue with the call. + | b ->vm_leave_cp // No base? Just remove C frame. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the + |// stack, so BASE doesn't need to be reloaded across these calls. + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8 + | lwz TMP0, -12(BASE) // Continuation. + | mr RB, BASE + | mr BASE, TMP2 // Restore caller BASE. + | lwz LFUNC:TMP1, FRAME_FUNC(TMP2) + | cmplwi TMP0, 0 + | lwz PC, -16(RB) // Restore PC from [cont|PC]. + | beq >1 + | subi TMP2, RD, 8 + | lwz TMP1, LFUNC:TMP1->pc + | evstddx TISNIL, RA, TMP2 // Ensure one valid arg. + | lwz KBASE, PC2PROTO(k)(TMP1) + | // BASE = base, RA = resultptr, RB = meta base + | mtctr TMP0 + | bctr // Jump to continuation. + | + |1: // Tail call from C function. + | subi TMP1, RB, 16 + | sub RC, TMP1, BASE + | b ->vm_call_tail + | + |->cont_cat: // RA = resultptr, RB = meta base + | lwz INS, -4(PC) + | subi CARG2, RB, 16 + | decode_RB8 SAVE0, INS + | evldd TMP0, 0(RA) + | add TMP1, BASE, SAVE0 + | stw BASE, L->base + | cmplw TMP1, CARG2 + | sub CARG3, CARG2, TMP1 + | decode_RA8 RA, INS + | evstdd TMP0, 0(CARG2) + | bne ->BC_CAT_Z + | evstddx TMP0, BASE, RA + | b ->cont_nop + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets1: + | evmergelo STR:RC, TISSTR, STR:RC + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | decode_RB8 RB, INS + | evstdd STR:RC, 0(CARG3) + | add CARG2, BASE, RB + | b >1 + | + |->vmeta_tgets: + | evmergelo TAB:RB, TISTAB, TAB:RB + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + | evmergelo STR:RC, TISSTR, STR:RC + | evstdd TAB:RB, 0(CARG2) + | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) + | evstdd STR:RC, 0(CARG3) + | b >1 + | + |->vmeta_tgetb: // TMP0 = index + | efdcfsi TMP0, TMP0 + | decode_RB8 RB, INS + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | add CARG2, BASE, RB + | evstdd TMP0, 0(CARG3) + | b >1 + | + |->vmeta_tgetv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | cmplwi CRET1, 0 + | beq >3 + | evldd TMP0, 0(CRET1) + | evstddx TMP0, BASE, RA + | ins_next + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | subfic TMP1, BASE, FRAME_CONT + | lwz BASE, L->top + | stw PC, -16(BASE) // [cont|PC] + | add PC, TMP1, BASE + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | li NARGS8:RC, 16 // 2 args for func(t, k). + | b ->vm_call_dispatch_f + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets1: + | evmergelo STR:RC, TISSTR, STR:RC + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | decode_RB8 RB, INS + | evstdd STR:RC, 0(CARG3) + | add CARG2, BASE, RB + | b >1 + | + |->vmeta_tsets: + | evmergelo TAB:RB, TISTAB, TAB:RB + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + | evmergelo STR:RC, TISSTR, STR:RC + | evstdd TAB:RB, 0(CARG2) + | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH) + | evstdd STR:RC, 0(CARG3) + | b >1 + | + |->vmeta_tsetb: // TMP0 = index + | efdcfsi TMP0, TMP0 + | decode_RB8 RB, INS + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | add CARG2, BASE, RB + | evstdd TMP0, 0(CARG3) + | b >1 + | + |->vmeta_tsetv: + | decode_RB8 RB, INS + | decode_RC8 RC, INS + | add CARG2, BASE, RB + | add CARG3, BASE, RC + |1: + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // Returns TValue * (finished) or NULL (metamethod). + | cmplwi CRET1, 0 + | evlddx TMP0, BASE, RA + | beq >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | evstdd TMP0, 0(CRET1) + | ins_next + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | subfic TMP1, BASE, FRAME_CONT + | lwz BASE, L->top + | stw PC, -16(BASE) // [cont|PC] + | add PC, TMP1, BASE + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | li NARGS8:RC, 24 // 3 args for func(t, k, v) + | evstdd TMP0, 16(BASE) // Copy value to third argument. + | b ->vm_call_dispatch_f + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + | mr CARG1, L + | subi PC, PC, 4 + | add CARG2, BASE, RA + | stw PC, SAVE_PC + | add CARG3, BASE, RD + | stw BASE, L->base + | decode_OP1 CARG4, INS + | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // Returns 0/1 or TValue * (metamethod). + |3: + | cmplwi CRET1, 1 + | bgt ->vmeta_binop + |4: + | lwz INS, 0(PC) + | addi PC, PC, 4 + | decode_RD4 TMP2, INS + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | add TMP2, TMP2, TMP3 + | isellt PC, PC, TMP2 + |->cont_nop: + | ins_next + | + |->cont_ra: // RA = resultptr + | lwz INS, -4(PC) + | evldd TMP0, 0(RA) + | decode_RA8 TMP1, INS + | evstddx TMP0, BASE, TMP1 + | b ->cont_nop + | + |->cont_condt: // RA = resultptr + | lwz TMP0, 0(RA) + | li TMP1, LJ_TTRUE + | cmplw TMP1, TMP0 // Branch if result is true. + | b <4 + | + |->cont_condf: // RA = resultptr + | lwz TMP0, 0(RA) + | li TMP1, LJ_TFALSE + | cmplw TMP0, TMP1 // Branch if result is false. + | b <4 + | + |->vmeta_equal: + | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. + | subi PC, PC, 4 + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // Returns 0/1 or TValue * (metamethod). + | b <3 + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vn: + | add CARG3, BASE, RB + | add CARG4, KBASE, RC + | b >1 + | + |->vmeta_arith_nv: + | add CARG3, KBASE, RC + | add CARG4, BASE, RB + | b >1 + | + |->vmeta_unm: + | add CARG3, BASE, RD + | mr CARG4, CARG3 + | b >1 + | + |->vmeta_arith_vv: + | add CARG3, BASE, RB + | add CARG4, BASE, RC + |1: + | add CARG2, BASE, RA + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS. + | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // Returns NULL (finished) or TValue * (metamethod). + | cmplwi CRET1, 0 + | beq ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2 + | sub TMP1, CRET1, BASE + | stw PC, -16(CRET1) // [cont|PC] + | addi PC, TMP1, FRAME_CONT + | mr BASE, CRET1 + | li NARGS8:RC, 16 // 2 args for func(o1, o2). + | b ->vm_call_dispatch + | + |->vmeta_len: +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | mr SAVE0, CARG1 +#endif + | add CARG2, BASE, RD + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | bl extern lj_meta_len // (lua_State *L, TValue *o) + | // Returns NULL (retry) or TValue * (metamethod base). +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmplwi CRET1, 0 + | bne ->vmeta_binop // Binop call for compatibility. + | mr CARG1, SAVE0 + | b ->BC_LEN_Z +#else + | b ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call: // Resolve and call __call metamethod. + | // TMP2 = old base, BASE = new base, RC = nargs*8 + | mr CARG1, L + | stw TMP2, L->base // This is the callers base! + | subi CARG2, BASE, 8 + | stw PC, SAVE_PC + | add CARG3, BASE, RC + | mr SAVE0, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. + | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. + | ins_call + | + |->vmeta_callt: // Resolve __call for BC_CALLT. + | // BASE = old base, RA = new base, RC = nargs*8 + | mr CARG1, L + | stw BASE, L->base + | subi CARG2, RA, 8 + | stw PC, SAVE_PC + | add CARG3, RA, RC + | mr SAVE0, NARGS8:RC + | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | lwz TMP1, FRAME_PC(BASE) + | addi NARGS8:RC, SAVE0, 8 // Got one more argument now. + | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here. + | b ->BC_CALLT_Z + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mr CARG1, L + | stw BASE, L->base + | mr CARG2, RA + | stw PC, SAVE_PC + | mr SAVE0, INS + | bl extern lj_meta_for // (lua_State *L, TValue *base) +#if LJ_HASJIT + | decode_OP1 TMP0, SAVE0 +#endif + | decode_RA8 RA, SAVE0 +#if LJ_HASJIT + | cmpwi TMP0, BC_JFORI +#endif + | decode_RD8 RD, SAVE0 +#if LJ_HASJIT + | beq =>BC_JFORI +#endif + | b =>BC_FORI + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 8 + | evldd CARG1, 0(BASE) + | blt ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | cmplwi NARGS8:RC, 16 + | evldd CARG1, 0(BASE) + | evldd CARG2, 8(BASE) + | blt ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc_1 name + | checknum CARG1 + | checkfail ->fff_fallback + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc_2 name + | evmergehi TMP0, CARG1, CARG2 + | checknum TMP0 + | checkanyfail ->fff_fallback + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1. + |.macro ffgccheck + | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | cmplw TMP0, TMP1 + | bgel ->fff_gcstep + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc assert + | cmplwi NARGS8:RC, 8 + | evldd TMP0, 0(BASE) + | blt ->fff_fallback + | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE. + | la RA, -8(BASE) + | evcmpltu cr1, TMP0, TMP1 + | lwz PC, FRAME_PC(BASE) + | bge cr1, ->fff_fallback + | evstdd TMP0, 0(RA) + | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8. + | beq ->fff_res // Done if exactly 1 argument. + | li TMP1, 8 + | subi RC, RC, 8 + |1: + | cmplw TMP1, RC + | evlddx TMP0, BASE, TMP1 + | evstddx TMP0, RA, TMP1 + | addi TMP1, TMP1, 8 + | bne <1 + | b ->fff_res + | + |.ffunc type + | cmplwi NARGS8:RC, 8 + | lwz CARG1, 0(BASE) + | blt ->fff_fallback + | li TMP2, ~LJ_TNUMX + | cmplw CARG1, TISNUM + | not TMP1, CARG1 + | isellt TMP1, TMP2, TMP1 + | slwi TMP1, TMP1, 3 + | la TMP2, CFUNC:RB->upvalue + | evlddx STR:CRET1, TMP2, TMP1 + | b ->fff_restv + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | checktab CARG1 + | evmergehi TMP1, CARG1, CARG1 + | checkfail >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | lwz TAB:RB, TAB:CARG1->metatable + |2: + | evmr CRET1, TISNIL + | cmplwi TAB:RB, 0 + | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) + | beq ->fff_restv + | lwz TMP0, TAB:RB->hmask + | evmergelo CRET1, TISTAB, TAB:RB // Use metatable as default result. + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:RB->node + | evmergelo STR:RC, TISSTR, STR:RC + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |3: // Rearranged logic, because we expect _not_ to find the key. + | evldd TMP0, NODE:TMP2->key + | evldd TMP1, NODE:TMP2->val + | evcmpeq TMP0, STR:RC + | lwz NODE:TMP2, NODE:TMP2->next + | checkallok >5 + | cmplwi NODE:TMP2, 0 + | beq ->fff_restv // Not found, keep default result. + | b <3 + |5: + | checknil TMP1 + | checkok ->fff_restv // Ditto for nil value. + | evmr CRET1, TMP1 // Return value of mt.__metatable. + | b ->fff_restv + | + |6: + | cmpwi TMP1, LJ_TUDATA + | not TMP1, TMP1 + | beq <1 + | checknum CARG1 + | slwi TMP1, TMP1, 2 + | li TMP2, 4*~LJ_TNUMX + | isellt TMP1, TMP2, TMP1 + | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH) + | lwzx TAB:RB, TMP2, TMP1 + | b <2 + | + |.ffunc_2 setmetatable + | // Fast path: no mt for table yet and not clearing the mt. + | evmergehi TMP0, TAB:CARG1, TAB:CARG2 + | checktab TMP0 + | checkanyfail ->fff_fallback + | lwz TAB:TMP1, TAB:CARG1->metatable + | cmplwi TAB:TMP1, 0 + | lbz TMP3, TAB:CARG1->marked + | bne ->fff_fallback + | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | stw TAB:CARG2, TAB:CARG1->metatable + | beq ->fff_restv + | barrierback TAB:CARG1, TMP3, TMP0 + | b ->fff_restv + | + |.ffunc rawget + | cmplwi NARGS8:RC, 16 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | checktab CARG2 + | la CARG3, 8(BASE) + | checkfail ->fff_fallback + | mr CARG1, L + | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // Returns cTValue *. + | evldd CRET1, 0(CRET1) + | b ->fff_restv + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | cmplwi NARGS8:RC, 8 + | evldd CARG1, 0(BASE) + | bne ->fff_fallback // Exactly one argument. + | checknum CARG1 + | checkok ->fff_restv + | b ->fff_fallback + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | checkstr CARG1 + | // A __tostring method in the string base metatable is ignored. + | checkok ->fff_restv // String key? + | // Handle numbers inline, unless a number base metatable is present. + | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) + | checknum CARG1 + | cmplwi cr1, TMP0, 0 + | stw BASE, L->base // Add frame since C call can throw. + |.if SPE + | crand 4*cr0+eq, 4*cr0+lt, 4*cr1+eq + |.else + |.error "NYI" + |.endif + | stw PC, SAVE_PC // Redundant (but a defined value). + | bne ->fff_fallback + | ffgccheck + | mr CARG1, L + | mr CARG2, BASE + | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np) + | // Returns GCstr *. + | evmergelo STR:CRET1, TISSTR, STR:CRET1 + | b ->fff_restv + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc next + | cmplwi NARGS8:RC, 8 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | evstddx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil. + | checktab TAB:CARG2 + | lwz PC, FRAME_PC(BASE) + | checkfail ->fff_fallback + | stw BASE, L->base // Add frame since C call can throw. + | mr CARG1, L + | stw BASE, L->top // Dummy frame length is ok. + | la CARG3, 8(BASE) + | stw PC, SAVE_PC + | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Returns 0 at end of traversal. + | cmplwi CRET1, 0 + | evmr CRET1, TISNIL + | beq ->fff_restv // End of traversal: return nil. + | evldd TMP0, 8(BASE) // Copy key and value to results. + | la RA, -8(BASE) + | evldd TMP1, 16(BASE) + | evstdd TMP0, 0(RA) + | li RD, (2+1)*8 + | evstdd TMP1, 8(RA) + | b ->fff_res + | + |.ffunc_1 pairs + | checktab TAB:CARG1 + | lwz PC, FRAME_PC(BASE) + | checkfail ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | lwz TAB:TMP2, TAB:CARG1->metatable + | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0] + | cmplwi TAB:TMP2, 0 + | la RA, -8(BASE) + | bne ->fff_fallback +#else + | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0] + | la RA, -8(BASE) +#endif + | evstdd TISNIL, 8(BASE) + | li RD, (3+1)*8 + | evstdd CFUNC:TMP0, 0(RA) + | b ->fff_res + | + |.ffunc_2 ipairs_aux + | checktab TAB:CARG1 + | lwz PC, FRAME_PC(BASE) + | checkfail ->fff_fallback + | checknum CARG2 + | lus TMP3, 0x3ff0 + | checkfail ->fff_fallback + | efdctsi TMP2, CARG2 + | lwz TMP0, TAB:CARG1->asize + | evmergelo TMP3, TMP3, ZERO + | lwz TMP1, TAB:CARG1->array + | efdadd CARG2, CARG2, TMP3 + | addi TMP2, TMP2, 1 + | la RA, -8(BASE) + | cmplw TMP0, TMP2 + | slwi TMP3, TMP2, 3 + | evstdd CARG2, 0(RA) + | ble >2 // Not in array part? + | evlddx TMP1, TMP1, TMP3 + |1: + | checknil TMP1 + | li RD, (0+1)*8 + | checkok ->fff_res // End of iteration, return 0 results. + | li RD, (2+1)*8 + | evstdd TMP1, 8(RA) + | b ->fff_res + |2: // Check for empty hash part first. Otherwise call C function. + | lwz TMP0, TAB:CARG1->hmask + | cmplwi TMP0, 0 + | li RD, (0+1)*8 + | beq ->fff_res + | mr CARG2, TMP2 + | bl extern lj_tab_getinth // (GCtab *t, int32_t key) + | // Returns cTValue * or NULL. + | cmplwi CRET1, 0 + | li RD, (0+1)*8 + | beq ->fff_res + | evldd TMP1, 0(CRET1) + | b <1 + | + |.ffunc_1 ipairs + | checktab TAB:CARG1 + | lwz PC, FRAME_PC(BASE) + | checkfail ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | lwz TAB:TMP2, TAB:CARG1->metatable + | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0] + | cmplwi TAB:TMP2, 0 + | la RA, -8(BASE) + | bne ->fff_fallback +#else + | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0] + | la RA, -8(BASE) +#endif + | evsplati TMP1, 0 + | li RD, (3+1)*8 + | evstdd TMP1, 8(BASE) + | evstdd CFUNC:TMP0, 0(RA) + | b ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall + | cmplwi NARGS8:RC, 8 + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | blt ->fff_fallback + | mr TMP2, BASE + | la BASE, 8(BASE) + | // Remember active hook before pcall. + | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31 + | subi NARGS8:RC, NARGS8:RC, 8 + | addi PC, TMP3, 8+FRAME_PCALL + | b ->vm_call_dispatch + | + |.ffunc_2 xpcall + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | mr TMP2, BASE + | checkfunc CARG2 // Traceback must be a function. + | checkfail ->fff_fallback + | la BASE, 16(BASE) + | // Remember active hook before pcall. + | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31 + | evstdd CARG2, 0(TMP2) // Swap function and traceback. + | subi NARGS8:RC, NARGS8:RC, 16 + | evstdd CARG1, 8(TMP2) + | addi PC, TMP3, 16+FRAME_PCALL + | b ->vm_call_dispatch + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | evmergehi TMP0, L:CARG1, L:CARG1 + |.else + |.ffunc coroutine_wrap_aux + | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr + |.endif + |.if resume + | cmpwi TMP0, LJ_TTHREAD + | bne ->fff_fallback + |.endif + | lbz TMP0, L:CARG1->status + | lwz TMP1, L:CARG1->cframe + | lwz CARG2, L:CARG1->top + | cmplwi cr0, TMP0, LUA_YIELD + | lwz TMP2, L:CARG1->base + | cmplwi cr1, TMP1, 0 + | lwz TMP0, L:CARG1->maxstack + | cmplw cr7, CARG2, TMP2 + | lwz PC, FRAME_PC(BASE) + | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0 + | add TMP2, CARG2, NARGS8:RC + | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD + | cmplw cr1, TMP2, TMP0 + | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt + | stw PC, SAVE_PC + | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov + | stw BASE, L->base + | blt cr6, ->fff_fallback + |1: + |.if resume + | addi BASE, BASE, 8 // Keep resumed thread in stack for GC. + | subi NARGS8:RC, NARGS8:RC, 8 + | subi TMP2, TMP2, 8 + |.endif + | stw TMP2, L:CARG1->top + | li TMP1, 0 + | stw BASE, L->top + |2: // Move args to coroutine. + | cmpw TMP1, NARGS8:RC + | evlddx TMP0, BASE, TMP1 + | beq >3 + | evstddx TMP0, CARG2, TMP1 + | addi TMP1, TMP1, 8 + | b <2 + |3: + | li CARG3, 0 + | mr L:SAVE0, L:CARG1 + | li CARG4, 0 + | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | // Returns thread status. + |4: + | lwz TMP2, L:SAVE0->base + | cmplwi CRET1, LUA_YIELD + | lwz TMP3, L:SAVE0->top + | li_vmstate INTERP + | lwz BASE, L->base + | st_vmstate + | bgt >8 + | sub RD, TMP3, TMP2 + | lwz TMP0, L->maxstack + | cmplwi RD, 0 + | add TMP1, BASE, RD + | beq >6 // No results? + | cmplw TMP1, TMP0 + | li TMP1, 0 + | bgt >9 // Need to grow stack? + | + | subi TMP3, RD, 8 + | stw TMP2, L:SAVE0->top // Clear coroutine stack. + |5: // Move results from coroutine. + | cmplw TMP1, TMP3 + | evlddx TMP0, TMP2, TMP1 + | evstddx TMP0, BASE, TMP1 + | addi TMP1, TMP1, 8 + | bne <5 + |6: + | andi. TMP0, PC, FRAME_TYPE + |.if resume + | li TMP1, LJ_TTRUE + | la RA, -8(BASE) + | stw TMP1, -8(BASE) // Prepend true to results. + | addi RD, RD, 16 + |.else + | mr RA, BASE + | addi RD, RD, 8 + |.endif + |7: + | stw PC, SAVE_PC + | mr MULTRES, RD + | beq ->BC_RET_Z + | b ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | andi. TMP0, PC, FRAME_TYPE + | la TMP3, -8(TMP3) + | li TMP1, LJ_TFALSE + | evldd TMP0, 0(TMP3) + | stw TMP3, L:SAVE0->top // Remove error from coroutine stack. + | li RD, (2+1)*8 + | stw TMP1, -8(BASE) // Prepend false to results. + | la RA, -8(BASE) + | evstdd TMP0, 0(BASE) // Copy error message. + | b <7 + |.else + | mr CARG1, L + | mr CARG2, L:SAVE0 + | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co) + |.endif + | + |9: // Handle stack expansion on return from yield. + | mr CARG1, L + | srwi CARG2, RD, 3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | li CRET1, 0 + | b <4 + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | lwz TMP0, L->cframe + | add TMP1, BASE, NARGS8:RC + | stw BASE, L->base + | andi. TMP0, TMP0, CFRAME_RESUME + | stw TMP1, L->top + | li CRET1, LUA_YIELD + | beq ->fff_fallback + | stw ZERO, L->cframe + | stb CRET1, L->status + | b ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + |.ffunc_n math_abs + | efdabs CRET1, CARG1 + | // Fallthrough. + | + |->fff_restv: + | // CRET1 = TValue result. + | lwz PC, FRAME_PC(BASE) + | la RA, -8(BASE) + | evstdd CRET1, 0(RA) + |->fff_res1: + | // RA = results, PC = return. + | li RD, (1+1)*8 + |->fff_res: + | // RA = results, RD = (nresults+1)*8, PC = return. + | andi. TMP0, PC, FRAME_TYPE + | mr MULTRES, RD + | bne ->vm_return + | lwz INS, -4(PC) + | decode_RB8 RB, INS + |5: + | cmplw RB, RD // More results expected? + | decode_RA8 TMP0, INS + | bgt >6 + | ins_next1 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | sub BASE, RA, TMP0 + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | evstddx TISNIL, RA, TMP1 + | b <5 + | + |.macro math_extern, func + | .ffunc math_ .. func + | cmplwi NARGS8:RC, 8 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | checknum CARG2 + | evmergehi CARG1, CARG2, CARG2 + | checkfail ->fff_fallback + | bl extern func + | evmergelo CRET1, CRET1, CRET2 + | b ->fff_restv + |.endmacro + | + |.macro math_extern2, func + | .ffunc math_ .. func + | cmplwi NARGS8:RC, 16 + | evldd CARG2, 0(BASE) + | evldd CARG4, 8(BASE) + | blt ->fff_fallback + | evmergehi CARG1, CARG4, CARG2 + | checknum CARG1 + | evmergehi CARG3, CARG4, CARG4 + | checkanyfail ->fff_fallback + | bl extern func + | evmergelo CRET1, CRET1, CRET2 + | b ->fff_restv + |.endmacro + | + |.macro math_round, func + | .ffunc math_ .. func + | cmplwi NARGS8:RC, 8 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | checknum CARG2 + | evmergehi CARG1, CARG2, CARG2 + | checkfail ->fff_fallback + | lwz PC, FRAME_PC(BASE) + | bl ->vm_..func.._hilo; + | la RA, -8(BASE) + | evstdd CRET2, 0(RA) + | b ->fff_res1 + |.endmacro + | + | math_round floor + | math_round ceil + | + | math_extern sqrt + | math_extern log + | math_extern log10 + | math_extern exp + | math_extern sin + | math_extern cos + | math_extern tan + | math_extern asin + | math_extern acos + | math_extern atan + | math_extern sinh + | math_extern cosh + | math_extern tanh + | math_extern2 pow + | math_extern2 atan2 + | math_extern2 fmod + | + |->ff_math_deg: + |.ffunc_n math_rad + | evldd CARG2, CFUNC:RB->upvalue[0] + | efdmul CRET1, CARG1, CARG2 + | b ->fff_restv + | + |.ffunc math_ldexp + | cmplwi NARGS8:RC, 16 + | evldd CARG2, 0(BASE) + | evldd CARG4, 8(BASE) + | blt ->fff_fallback + | evmergehi CARG1, CARG4, CARG2 + | checknum CARG1 + | checkanyfail ->fff_fallback + | efdctsi CARG3, CARG4 + | bl extern ldexp + | evmergelo CRET1, CRET1, CRET2 + | b ->fff_restv + | + |.ffunc math_frexp + | cmplwi NARGS8:RC, 8 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | checknum CARG2 + | evmergehi CARG1, CARG2, CARG2 + | checkfail ->fff_fallback + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | lwz PC, FRAME_PC(BASE) + | bl extern frexp + | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH) + | evmergelo CRET1, CRET1, CRET2 + | efdcfsi CRET2, TMP1 + | la RA, -8(BASE) + | evstdd CRET1, 0(RA) + | li RD, (2+1)*8 + | evstdd CRET2, 8(RA) + | b ->fff_res + | + |.ffunc math_modf + | cmplwi NARGS8:RC, 8 + | evldd CARG2, 0(BASE) + | blt ->fff_fallback + | checknum CARG2 + | evmergehi CARG1, CARG2, CARG2 + | checkfail ->fff_fallback + | la CARG3, -8(BASE) + | lwz PC, FRAME_PC(BASE) + | bl extern modf + | evmergelo CRET1, CRET1, CRET2 + | la RA, -8(BASE) + | evstdd CRET1, 0(BASE) + | li RD, (2+1)*8 + | b ->fff_res + | + |.macro math_minmax, name, cmpop + | .ffunc_1 name + | checknum CARG1 + | li TMP1, 8 + | checkfail ->fff_fallback + |1: + | evlddx CARG2, BASE, TMP1 + | cmplw cr1, TMP1, NARGS8:RC + | checknum CARG2 + | bge cr1, ->fff_restv // Ok, since CRET1 = CARG1. + | checkfail ->fff_fallback + | cmpop CARG2, CARG1 + | addi TMP1, TMP1, 8 + | crmove 4*cr0+lt, 4*cr0+gt + | evsel CARG1, CARG2, CARG1 + | b <1 + |.endmacro + | + | math_minmax math_min, efdtstlt + | math_minmax math_max, efdtstgt + | + |//-- String library ----------------------------------------------------- + | + |.ffunc_1 string_len + | checkstr STR:CARG1 + | checkfail ->fff_fallback + | lwz TMP0, STR:CARG1->len + | efdcfsi CRET1, TMP0 + | b ->fff_restv + | + |.ffunc string_byte // Only handle the 1-arg case here. + | cmplwi NARGS8:RC, 8 + | evldd STR:CARG1, 0(BASE) + | bne ->fff_fallback // Need exactly 1 argument. + | checkstr STR:CARG1 + | la RA, -8(BASE) + | checkfail ->fff_fallback + | lwz TMP0, STR:CARG1->len + | li RD, (0+1)*8 + | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end). + | li TMP2, (1+1)*8 + | cmplwi TMP0, 0 + | lwz PC, FRAME_PC(BASE) + | efdcfsi CRET1, TMP1 + | iseleq RD, RD, TMP2 + | evstdd CRET1, 0(RA) + | b ->fff_res + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | cmplwi NARGS8:RC, 8 + | evldd CARG1, 0(BASE) + | bne ->fff_fallback // Exactly 1 argument. + | checknum CARG1 + | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) + | checkfail ->fff_fallback + | efdctsiz TMP0, CARG1 + | li CARG3, 1 + | cmplwi TMP0, 255 + | stb TMP0, 0(CARG2) + | bgt ->fff_fallback + |->fff_newstr: + | mr CARG1, L + | stw BASE, L->base + | stw PC, SAVE_PC + | bl extern lj_str_new // (lua_State *L, char *str, size_t l) + | // Returns GCstr *. + | lwz BASE, L->base + | evmergelo STR:CRET1, TISSTR, STR:CRET1 + | b ->fff_restv + | + |.ffunc string_sub + | ffgccheck + | cmplwi NARGS8:RC, 16 + | evldd CARG3, 16(BASE) + | evldd STR:CARG1, 0(BASE) + | blt ->fff_fallback + | evldd CARG2, 8(BASE) + | li TMP2, -1 + | beq >1 + | checknum CARG3 + | checkfail ->fff_fallback + | efdctsiz TMP2, CARG3 + |1: + | checknum CARG2 + | checkfail ->fff_fallback + | checkstr STR:CARG1 + | efdctsiz TMP1, CARG2 + | checkfail ->fff_fallback + | lwz TMP0, STR:CARG1->len + | cmplw TMP0, TMP2 // len < end? (unsigned compare) + | add TMP3, TMP2, TMP0 + | blt >5 + |2: + | cmpwi TMP1, 0 // start <= 0? + | add TMP3, TMP1, TMP0 + | ble >7 + |3: + | sub. CARG3, TMP2, TMP1 + | addi CARG2, STR:CARG1, #STR-1 + | addi CARG3, CARG3, 1 + | add CARG2, CARG2, TMP1 + | isellt CARG3, r0, CARG3 + | b ->fff_newstr + | + |5: // Negative end or overflow. + | cmpw TMP0, TMP2 + | addi TMP3, TMP3, 1 + | iselgt TMP2, TMP3, TMP0 // end = end > len ? len : end+len+1 + | b <2 + | + |7: // Negative start or underflow. + | cmpwi cr1, TMP3, 0 + | iseleq TMP1, r0, TMP3 + | isel TMP1, r0, TMP1, 4*cr1+lt + | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0) + | b <3 + | + |.ffunc string_rep // Only handle the 1-char case inline. + | ffgccheck + | cmplwi NARGS8:RC, 16 + | evldd CARG1, 0(BASE) + | evldd CARG2, 8(BASE) + | blt ->fff_fallback + | checknum CARG2 + | checkfail ->fff_fallback + | checkstr STR:CARG1 + | efdctsiz CARG3, CARG2 + | checkfail ->fff_fallback + | lwz TMP0, STR:CARG1->len + | cmpwi CARG3, 0 + | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH) + | ble >2 // Count <= 0? (or non-int) + | cmplwi TMP0, 1 + | subi TMP2, CARG3, 1 + | blt >2 // Zero length string? + | cmplw cr1, TMP1, CARG3 + | bne ->fff_fallback // Fallback for > 1-char strings. + | lbz TMP0, STR:CARG1[1] + | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH) + | blt cr1, ->fff_fallback + |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?). + | cmplwi TMP2, 0 + | stbx TMP0, CARG2, TMP2 + | subi TMP2, TMP2, 1 + | bne <1 + | b ->fff_newstr + |2: // Return empty string. + | la STR:CRET1, DISPATCH_GL(strempty)(DISPATCH) + | evmergelo CRET1, TISSTR, STR:CRET1 + | b ->fff_restv + | + |.ffunc string_reverse + | ffgccheck + | cmplwi NARGS8:RC, 8 + | evldd CARG1, 0(BASE) + | blt ->fff_fallback + | checkstr STR:CARG1 + | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH) + | checkfail ->fff_fallback + | lwz CARG3, STR:CARG1->len + | la CARG1, #STR(STR:CARG1) + | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH) + | li TMP2, 0 + | cmplw TMP1, CARG3 + | subi TMP3, CARG3, 1 + | blt ->fff_fallback + |1: // Reverse string copy. + | cmpwi TMP3, 0 + | lbzx TMP1, CARG1, TMP2 + | blt ->fff_newstr + | stbx TMP1, CARG2, TMP3 + | subi TMP3, TMP3, 1 + | addi TMP2, TMP2, 1 + | b <1 + | + |.macro ffstring_case, name, lo + | .ffunc name + | ffgccheck + | cmplwi NARGS8:RC, 8 + | evldd CARG1, 0(BASE) + | blt ->fff_fallback + | checkstr STR:CARG1 + | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH) + | checkfail ->fff_fallback + | lwz CARG3, STR:CARG1->len + | la CARG1, #STR(STR:CARG1) + | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH) + | cmplw TMP1, CARG3 + | li TMP2, 0 + | blt ->fff_fallback + |1: // ASCII case conversion. + | cmplw TMP2, CARG3 + | lbzx TMP1, CARG1, TMP2 + | bge ->fff_newstr + | subi TMP0, TMP1, lo + | xori TMP3, TMP1, 0x20 + | cmplwi TMP0, 26 + | isellt TMP1, TMP3, TMP1 + | stbx TMP1, CARG2, TMP2 + | addi TMP2, TMP2, 1 + | b <1 + |.endmacro + | + |ffstring_case string_lower, 65 + |ffstring_case string_upper, 97 + | + |//-- Table library ------------------------------------------------------ + | + |.ffunc_1 table_getn + | checktab CARG1 + | checkfail ->fff_fallback + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | efdcfsi CRET1, CRET1 + | b ->fff_restv + | + |//-- Bit library -------------------------------------------------------- + | + |.macro .ffunc_bit, name + | .ffunc_n bit_..name + | efdadd CARG1, CARG1, TOBIT + |.endmacro + | + |.ffunc_bit tobit + |->fff_resbit: + | efdcfsi CRET1, CARG1 + | b ->fff_restv + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name + | li TMP1, 8 + |1: + | evlddx CARG2, BASE, TMP1 + | cmplw cr1, TMP1, NARGS8:RC + | checknum CARG2 + | bge cr1, ->fff_resbit + | checkfail ->fff_fallback + | efdadd CARG2, CARG2, TOBIT + | ins CARG1, CARG1, CARG2 + | addi TMP1, TMP1, 8 + | b <1 + |.endmacro + | + |.ffunc_bit_op band, and + |.ffunc_bit_op bor, or + |.ffunc_bit_op bxor, xor + | + |.ffunc_bit bswap + | rotlwi TMP0, CARG1, 8 + | rlwimi TMP0, CARG1, 24, 0, 7 + | rlwimi TMP0, CARG1, 24, 16, 23 + | efdcfsi CRET1, TMP0 + | b ->fff_restv + | + |.ffunc_bit bnot + | not TMP0, CARG1 + | efdcfsi CRET1, TMP0 + | b ->fff_restv + | + |.macro .ffunc_bit_sh, name, ins, shmod + | .ffunc_nn bit_..name + | efdadd CARG2, CARG2, TOBIT + | efdadd CARG1, CARG1, TOBIT + |.if shmod == 1 + | rlwinm CARG2, CARG2, 0, 27, 31 + |.elif shmod == 2 + | neg CARG2, CARG2 + |.endif + | ins TMP0, CARG1, CARG2 + | efdcfsi CRET1, TMP0 + | b ->fff_restv + |.endmacro + | + |.ffunc_bit_sh lshift, slw, 1 + |.ffunc_bit_sh rshift, srw, 1 + |.ffunc_bit_sh arshift, sraw, 1 + |.ffunc_bit_sh rol, rotlw, 0 + |.ffunc_bit_sh ror, rotlw, 2 + | + |//----------------------------------------------------------------------- + | + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RB = CFUNC, RC = nargs*8 + | lwz TMP3, CFUNC:RB->f + | add TMP1, BASE, NARGS8:RC + | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC. + | addi TMP0, TMP1, 8*LUA_MINSTACK + | lwz TMP2, L->maxstack + | stw PC, SAVE_PC // Redundant (but a defined value). + | cmplw TMP0, TMP2 + | stw BASE, L->base + | stw TMP1, L->top + | mr CARG1, L + | bgt >5 // Need to grow stack. + | mtctr TMP3 + | bctrl // (lua_State *L) + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | lwz BASE, L->base + | cmpwi CRET1, 0 + | slwi RD, CRET1, 3 + | la RA, -8(BASE) + | bgt ->fff_res // Returned nresults+1? + |1: // Returned 0 or -1: retry fast path. + | lwz TMP0, L->top + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | sub NARGS8:RC, TMP0, BASE + | bne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | andi. TMP0, PC, FRAME_TYPE + | rlwinm TMP1, PC, 0, 0, 28 + | bne >3 + | lwz INS, -4(PC) + | decode_RA8 TMP1, INS + |3: + | sub TMP2, BASE, TMP1 + | b ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | li CARG2, LUA_MINSTACK + | bl extern lj_state_growstack // (lua_State *L, int n) + | lwz BASE, L->base + | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry. + | b <1 + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RC = nargs*8 + | mflr SAVE0 + | stw BASE, L->base + | add TMP0, BASE, NARGS8:RC + | stw PC, SAVE_PC // Redundant (but a defined value). + | stw TMP0, L->top + | mr CARG1, L + | bl extern lj_gc_step // (lua_State *L) + | lwz BASE, L->base + | mtlr SAVE0 + | lwz TMP0, L->top + | sub NARGS8:RC, TMP0, BASE + | lwz CFUNC:RB, FRAME_FUNC(BASE) + | blr + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. +#if LJ_HASJIT + | NYI +#endif + | + |->vm_rethook: // Dispatch target for return hooks. + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active? + | beq >1 + |5: // Re-dispatch to static ins. + | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS. + | lwzx TMP0, DISPATCH, TMP1 + | mtctr TMP0 + | bctr + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH) + | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active? + | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0 + | bne <5 + | + | cmpwi cr1, TMP0, 0 + | addic. TMP2, TMP2, -1 + | beq cr1, <5 + | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH) + | beq >1 + | bge cr1, <5 + |1: + | mr CARG1, L + | stw MULTRES, SAVE_MULTRES + | mr CARG2, PC + | stw BASE, L->base + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc) + |3: + | lwz BASE, L->base + |4: // Re-dispatch to static ins. + | lwz INS, -4(PC) + | decode_OP4 TMP1, INS + | decode_RB8 RB, INS + | addi TMP1, TMP1, GG_DISP2STATIC + | decode_RD8 RD, INS + | lwzx TMP0, DISPATCH, TMP1 + | decode_RA8 RA, INS + | decode_RC8 RC, INS + | mtctr TMP0 + | bctr + | + |->cont_hook: // Continue from hook yield. + | addi PC, PC, 4 + | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins. + | b <4 + | + |->vm_hotloop: // Hot loop counter underflow. +#if LJ_HASJIT + | NYI +#endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mr CARG2, PC +#if LJ_HASJIT + | b >1 +#endif + | + |->vm_hotcall: // Hot call counter underflow. +#if LJ_HASJIT + | ori CARG2, PC, 1 + |1: +#endif + | add TMP0, BASE, RC + | stw PC, SAVE_PC + | mr CARG1, L + | stw BASE, L->base + | sub RA, RA, BASE + | stw TMP0, L->top + | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc) + | // Returns ASMFunction. + | lwz BASE, L->base + | lwz TMP0, L->top + | stw ZERO, SAVE_PC // Invalidate for subsequent line hook. + | sub NARGS8:RC, TMP0, BASE + | add RA, BASE, RA + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | mtctr CRET1 + | bctr + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_exit_handler: +#if LJ_HASJIT + | NYI +#endif + |->vm_exit_interp: +#if LJ_HASJIT + | NYI +#endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called by math.floor/math.ceil fast functions + |// and from JIT code. + |// + |// This can be inlined if the CPU has the frin/friz/frip/frim instructions. + |// The alternative hard-float approaches have a deep dependency chain. + |// The resulting latency is at least 3x-7x the double-precision FP latency + |// (e500v2: 6cy, e600: 5cy, Cell: 10cy) or around 20-70 cycles. + |// + |// The soft-float approach is tedious, but much faster (e500v2: ~11cy/~6cy). + |// However it relies on a fast way to transfer the FP value to GPRs + |// (e500v2: 0cy for lo-word, 1cy for hi-word). + |// + |.macro vm_round, name, mode + | // Used temporaries: TMP0, TMP1, TMP2, TMP3. + |->name.._efd: // Input: CARG2, output: CRET2 + | evmergehi CARG1, CARG2, CARG2 + |->name.._hilo: + | // Input: CARG1 (hi), CARG2 (hi, lo), output: CRET2 + | rlwinm TMP2, CARG1, 12, 21, 31 + | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023 + | li TMP1, -1 + | cmplwi cr1, TMP2, 51 // 0 <= exp <= 51? + | subfic TMP0, TMP2, 52 + | bgt cr1, >1 + | lus TMP3, 0xfff0 + | slw TMP0, TMP1, TMP0 // lomask = -1 << (52-exp) + | sraw TMP1, TMP3, TMP2 // himask = (int32_t)0xfff00000 >> exp + |.if mode == 2 // trunc(x): + | evmergelo TMP0, TMP1, TMP0 + | evand CRET2, CARG2, TMP0 // hi &= himask, lo &= lomask + |.else + | andc TMP2, CARG2, TMP0 + | andc TMP3, CARG1, TMP1 + | or TMP2, TMP2, TMP3 // ztest = (hi&~himask) | (lo&~lomask) + | srawi TMP3, CARG1, 31 // signmask = (int32_t)hi >> 31 + |.if mode == 0 // floor(x): + | and. TMP2, TMP2, TMP3 // iszero = ((ztest & signmask) == 0) + |.else // ceil(x): + | andc. TMP2, TMP2, TMP3 // iszero = ((ztest & ~signmask) == 0) + |.endif + | and CARG2, CARG2, TMP0 // lo &= lomask + | and CARG1, CARG1, TMP1 // hi &= himask + | subc TMP0, CARG2, TMP0 + | iseleq TMP0, CARG2, TMP0 // lo = iszero ? lo : lo-lomask + | sube TMP1, CARG1, TMP1 + | iseleq TMP1, CARG1, TMP1 // hi = iszero ? hi : hi-himask+carry + | evmergelo CRET2, TMP1, TMP0 + |.endif + | blr + |1: + | bgtlr // Already done if >=2^52, +-inf or nan. + |.if mode == 2 // trunc(x): + | rlwinm TMP1, CARG1, 0, 0, 0 // hi = sign(x) + | li TMP0, 0 + | evmergelo CRET2, TMP1, TMP0 + |.else + | rlwinm TMP2, CARG1, 0, 1, 31 + | srawi TMP0, CARG1, 31 // signmask = (int32_t)hi >> 31 + | or TMP2, TMP2, CARG2 // ztest = abs(hi) | lo + | lus TMP1, 0x3ff0 + |.if mode == 0 // floor(x): + | and. TMP2, TMP2, TMP0 // iszero = ((ztest & signmask) == 0) + |.else // ceil(x): + | andc. TMP2, TMP2, TMP0 // iszero = ((ztest & ~signmask) == 0) + |.endif + | li TMP0, 0 + | iseleq TMP1, r0, TMP1 + | rlwimi CARG1, TMP1, 0, 1, 31 // hi = sign(x) | (iszero ? 0.0 : 1.0) + | evmergelo CRET2, CARG1, TMP0 + |.endif + | blr + |.endmacro + | + |->vm_floor: + | mflr CARG3 + | bl ->vm_floor_hilo + | mtlr CARG3 + | evmergehi CRET1, CRET2, CRET2 + | blr + | + | vm_round vm_floor, 0 + | vm_round vm_ceil, 1 +#if LJ_HASJIT + | vm_round vm_trunc, 2 +#else + |->vm_trunc_efd: + |->vm_trunc_hilo: +#endif + | + |->vm_powi: +#if LJ_HASJIT + | NYI +#endif + | + |->vm_foldfpm: +#if LJ_HASJIT + | NYI +#endif + | + |// Callable from C: double lj_vm_foldarith(double x, double y, int op) + |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -) + |// and basic math functions. ORDER ARITH + |->vm_foldarith: + | evmergelo CARG2, CARG1, CARG2 + | cmplwi CARG5, 1 + | evmergelo CARG4, CARG3, CARG4 + | beq >1; bgt >2 + | efdadd CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr + |1: + | efdsub CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr + |2: + | cmplwi CARG5, 3; beq >1; bgt >2 + | efdmul CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr + |1: + | efddiv CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr + |2: + | cmplwi CARG5, 5; beq >1; bgt >2 + | evmr CARG3, CARG2; efddiv CRET2, CARG2, CARG4; evmr RB, CARG4 + | mflr RC; bl ->vm_floor_efd; mtlr RC + | efdmul CRET2, CRET2, RB; efdsub CRET2, CARG3, CRET2 + | evmergehi CRET1, CRET2, CRET2; blr + |1: + | b extern pow + |2: + | cmplwi CARG5, 7; beq >1; bgt >2 + | xoris CARG1, CARG1, 0x8000; blr + |1: + | rlwinm CARG1, CARG1, 0, 1, 31; blr + |2: + | NYI // Other operations only needed by JIT compiler. + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_ffi_call: +#if LJ_HASFFI + | NYI +#endif + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1*8, RD = src2*8, JMP with RD = target + | evlddx TMP0, BASE, RA + | addi PC, PC, 4 + | evlddx TMP1, BASE, RD + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | lwz TMP2, -4(PC) + | evmergehi RB, TMP0, TMP1 + | decode_RD4 TMP2, TMP2 + | checknum RB + | add TMP2, TMP2, TMP3 + | checkanyfail ->vmeta_comp + | efdcmplt TMP0, TMP1 + if (op == BC_ISLE || op == BC_ISGT) { + | efdcmpeq cr1, TMP0, TMP1 + | cror 4*cr0+gt, 4*cr0+gt, 4*cr1+gt + } + if (op == BC_ISLT || op == BC_ISLE) { + | iselgt PC, TMP2, PC + } else { + | iselgt PC, PC, TMP2 + } + | ins_next + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | // RA = src1*8, RD = src2*8, JMP with RD = target + | evlddx CARG2, BASE, RA + | addi PC, PC, 4 + | evlddx CARG3, BASE, RD + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | lwz TMP2, -4(PC) + | evmergehi RB, CARG2, CARG3 + | decode_RD4 TMP2, TMP2 + | checknum RB + | add TMP2, TMP2, TMP3 + | checkanyfail >5 + | efdcmpeq CARG2, CARG3 + if (vk) { + | iselgt PC, TMP2, PC + } else { + | iselgt PC, PC, TMP2 + } + |1: + | ins_next + | + |5: // Either or both types are not numbers. + | evcmpeq CARG2, CARG3 + | not TMP3, RB + | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive? + | crorc 4*cr7+lt, 4*cr0+so, 4*cr0+lt // 1: Same tv or different type. + | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata? + | crandc 4*cr7+gt, 4*cr0+lt, 4*cr1+gt // 2: Same type and primitive. + | mr SAVE0, PC + if (vk) { + | isel PC, TMP2, PC, 4*cr7+gt + } else { + | isel TMP2, PC, TMP2, 4*cr7+gt + } + | cror 4*cr7+lt, 4*cr7+lt, 4*cr7+gt // 1 or 2. + if (vk) { + | isel PC, TMP2, PC, 4*cr0+so + } else { + | isel PC, PC, TMP2, 4*cr0+so + } + | blt cr7, <1 // Done if 1 or 2. + | blt cr6, <1 // Done if not tab/ud. + | + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | lwz TAB:TMP2, TAB:CARG2->metatable + | li CARG4, 1-vk // ne = 0 or 1. + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable? + | lbz TMP2, TAB:TMP2->nomm + | andi. TMP2, TMP2, 1<<MM_eq + | bne <1 // Or 'no __eq' flag set? + | mr PC, SAVE0 // Restore old PC. + | b ->vmeta_equal // Handle __eq metamethod. + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | // RA = src*8, RD = str_const*8 (~), JMP with RD = target + | evlddx TMP0, BASE, RA + | srwi RD, RD, 1 + | lwz INS, 0(PC) + | subfic RD, RD, -4 + | addi PC, PC, 4 + | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4 + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | decode_RD4 TMP2, INS + | evmergelo STR:TMP1, TISSTR, STR:TMP1 + | add TMP2, TMP2, TMP3 + | evcmpeq TMP0, STR:TMP1 + if (vk) { + | isel PC, TMP2, PC, 4*cr0+so + } else { + | isel PC, PC, TMP2, 4*cr0+so + } + | ins_next + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | // RA = src*8, RD = num_const*8, JMP with RD = target + | evlddx TMP0, BASE, RA + | addi PC, PC, 4 + | evlddx TMP1, KBASE, RD + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | lwz INS, -4(PC) + | checknum TMP0 + | checkfail >5 + | efdcmpeq TMP0, TMP1 + |1: + | decode_RD4 TMP2, INS + | add TMP2, TMP2, TMP3 + if (vk) { + | iselgt PC, TMP2, PC + |5: + } else { + | iselgt PC, PC, TMP2 + } + |3: + | ins_next + if (!vk) { + |5: + | decode_RD4 TMP2, INS + | add PC, TMP2, TMP3 + | b <3 + } + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target + | lwzx TMP0, BASE, RA + | srwi TMP1, RD, 3 + | lwz INS, 0(PC) + | addi PC, PC, 4 + | not TMP1, TMP1 + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | cmplw TMP0, TMP1 + | decode_RD4 TMP2, INS + | add TMP2, TMP2, TMP3 + if (vk) { + | iseleq PC, TMP2, PC + } else { + | iseleq PC, PC, TMP2 + } + | ins_next + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | // RA = dst*8 or unused, RD = src*8, JMP with RD = target + | evlddx TMP0, BASE, RD + | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE. + | lwz INS, 0(PC) + | evcmpltu TMP0, TMP1 + | addi PC, PC, 4 + if (op == BC_IST || op == BC_ISF) { + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | decode_RD4 TMP2, INS + | add TMP2, TMP2, TMP3 + if (op == BC_IST) { + | isellt PC, TMP2, PC + } else { + | isellt PC, PC, TMP2 + } + } else { + if (op == BC_ISTC) { + | checkfail >1 + } else { + | checkok >1 + } + | addis PC, PC, -(BCBIAS_J*4 >> 16) + | decode_RD4 TMP2, INS + | evstddx TMP0, BASE, RA + | add PC, PC, TMP2 + |1: + } + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | // RA = dst*8, RD = src*8 + | ins_next1 + | evlddx TMP0, BASE, RD + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_NOT: + | // RA = dst*8, RD = src*8 + | ins_next1 + | lwzx TMP0, BASE, RD + | subfic TMP1, TMP0, LJ_TTRUE + | adde TMP0, TMP0, TMP1 + | stwx TMP0, BASE, RA + | ins_next2 + break; + case BC_UNM: + | // RA = dst*8, RD = src*8 + | evlddx TMP0, BASE, RD + | checknum TMP0 + | checkfail ->vmeta_unm + | efdneg TMP0, TMP0 + | ins_next1 + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_LEN: + | // RA = dst*8, RD = src*8 + | evlddx CARG1, BASE, RD + | checkstr CARG1 + | checkfail >2 + | lwz CRET1, STR:CARG1->len + |1: + | ins_next1 + | efdcfsi TMP0, CRET1 + | evstddx TMP0, BASE, RA + | ins_next2 + |2: + | checktab CARG1 + | checkfail ->vmeta_len +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | lwz TAB:TMP2, TAB:CARG1->metatable + | cmplwi TAB:TMP2, 0 + | bne >9 + |3: +#endif + |->BC_LEN_Z: + | bl extern lj_tab_len // (GCtab *t) + | // Returns uint32_t (but less than 2^31). + | b <1 +#ifdef LUAJIT_ENABLE_LUA52COMPAT + |9: + | lbz TMP0, TAB:TMP2->nomm + | andi. TMP0, TMP0, 1<<MM_len + | bne <3 // 'no __len' flag set: done. + | b ->vmeta_len +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre, t0, t1 + | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | evlddx t0, BASE, RB + | checknum t0 + | evlddx t1, KBASE, RC + | checkfail ->vmeta_arith_vn + || break; + ||case 1: + | evlddx t1, BASE, RB + | checknum t1 + | evlddx t0, KBASE, RC + | checkfail ->vmeta_arith_nv + || break; + ||default: + | evlddx t0, BASE, RB + | evlddx t1, BASE, RC + | evmergehi TMP2, t0, t1 + | checknum TMP2 + | checkanyfail ->vmeta_arith_vv + || break; + ||} + |.endmacro + | + |.macro ins_arith, ins + | ins_arithpre TMP0, TMP1 + | ins_next1 + | ins TMP0, TMP0, TMP1 + | evstddx TMP0, BASE, RA + | ins_next2 + |.endmacro + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arith efdadd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arith efdsub + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith efdmul + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arith efddiv + break; + case BC_MODVN: + | ins_arithpre RD, SAVE0 + |->BC_MODVN_Z: + | efddiv CARG2, RD, SAVE0 + | bl ->vm_floor_efd // floor(b/c) + | efdmul TMP0, CRET2, SAVE0 + | ins_next1 + | efdsub TMP0, RD, TMP0 // b - floor(b/c)*c + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_MODNV: case BC_MODVV: + | ins_arithpre RD, SAVE0 + | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + break; + case BC_POW: + | evlddx CARG2, BASE, RB + | evlddx CARG4, BASE, RC + | evmergehi CARG1, CARG4, CARG2 + | checknum CARG1 + | evmergehi CARG3, CARG4, CARG4 + | checkanyfail ->vmeta_arith_vv + | bl extern pow + | evmergelo CRET2, CRET1, CRET2 + | evstddx CRET2, BASE, RA + | ins_next + break; + + case BC_CAT: + | // RA = dst*8, RB = src_start*8, RC = src_end*8 + | sub CARG3, RC, RB + | stw BASE, L->base + | add CARG2, BASE, RC + | mr SAVE0, RB + |->BC_CAT_Z: + | stw PC, SAVE_PC + | mr CARG1, L + | srwi CARG3, CARG3, 3 + | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // Returns NULL (finished) or TValue * (metamethod). + | cmplwi CRET1, 0 + | lwz BASE, L->base + | bne ->vmeta_binop + | evlddx TMP0, BASE, SAVE0 // Copy result from RB to RA. + | evstddx TMP0, BASE, RA + | ins_next + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | // RA = dst*8, RD = str_const*8 (~) + | ins_next1 + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4 + | evmergelo TMP0, TISSTR, TMP0 + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_KCDATA: +#if LJ_HASFFI + | // RA = dst*8, RD = cdata_const*8 (~) + | ins_next1 + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4 + | li TMP2, LJ_TCDATA + | evmergelo TMP0, TMP2, TMP0 + | evstddx TMP0, BASE, RA + | ins_next2 +#endif + break; + case BC_KSHORT: + | // RA = dst*8, RD = int16_literal*8 + | srwi TMP1, RD, 3 + | extsh TMP1, TMP1 + | ins_next1 + | efdcfsi TMP0, TMP1 + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_KNUM: + | // RA = dst*8, RD = num_const*8 + | evlddx TMP0, KBASE, RD + | ins_next1 + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_KPRI: + | // RA = dst*8, RD = primitive_type*8 (~) + | srwi TMP1, RD, 3 + | not TMP0, TMP1 + | ins_next1 + | stwx TMP0, BASE, RA + | ins_next2 + break; + case BC_KNIL: + | // RA = base*8, RD = end*8 + | evstddx TISNIL, BASE, RA + | addi RA, RA, 8 + |1: + | evstddx TISNIL, BASE, RA + | cmpw RA, RD + | addi RA, RA, 8 + | blt <1 + | ins_next_ + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | // RA = dst*8, RD = uvnum*8 + | ins_next1 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RD, RD, 1 + | addi RD, RD, offsetof(GCfuncL, uvptr) + | lwzx UPVAL:RB, LFUNC:RB, RD + | lwz TMP1, UPVAL:RB->v + | evldd TMP0, 0(TMP1) + | evstddx TMP0, BASE, RA + | ins_next2 + break; + case BC_USETV: + | // RA = uvnum*8, RD = src*8 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | evlddx TMP1, BASE, RD + | lwzx UPVAL:RB, LFUNC:RB, RA + | lbz TMP3, UPVAL:RB->marked + | lwz CARG2, UPVAL:RB->v + | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) + | lbz TMP0, UPVAL:RB->closed + | evmergehi TMP2, TMP1, TMP1 + | evstdd TMP1, 0(CARG2) + | cmplwi cr1, TMP0, 0 + | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | subi TMP2, TMP2, (LJ_TISNUM+1) + | bne >2 // Upvalue is closed and black? + |1: + | ins_next + | + |2: // Check if new value is collectable. + | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1) + | bge <1 // tvisgcv(v) + | lbz TMP3, GCOBJ:TMP1->gch.marked + | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v) + | la CARG1, GG_DISP2G(DISPATCH) + | // Crossed a write barrier. Move the barrier forward. + | beq <1 + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETS: + | // RA = uvnum*8, RD = str_const*8 (~) + | ins_next1 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi TMP1, RD, 1 + | srwi RA, RA, 1 + | subfic TMP1, TMP1, -4 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4 + | lwzx UPVAL:RB, LFUNC:RB, RA + | evmergelo STR:TMP1, TISSTR, STR:TMP1 + | lbz TMP3, UPVAL:RB->marked + | lwz CARG2, UPVAL:RB->v + | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) + | lbz TMP3, STR:TMP1->marked + | lbz TMP2, UPVAL:RB->closed + | evstdd STR:TMP1, 0(CARG2) + | bne >2 + |1: + | ins_next2 + | + |2: // Check if string is white and ensure upvalue is closed. + | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str) + | cmplwi cr1, TMP2, 0 + | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq + | la CARG1, GG_DISP2G(DISPATCH) + | // Crossed a write barrier. Move the barrier forward. + | beq <1 + | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv) + | b <1 + break; + case BC_USETN: + | // RA = uvnum*8, RD = num_const*8 + | ins_next1 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | evlddx TMP0, KBASE, RD + | lwzx UPVAL:RB, LFUNC:RB, RA + | lwz TMP1, UPVAL:RB->v + | evstdd TMP0, 0(TMP1) + | ins_next2 + break; + case BC_USETP: + | // RA = uvnum*8, RD = primitive_type*8 (~) + | ins_next1 + | lwz LFUNC:RB, FRAME_FUNC(BASE) + | srwi RA, RA, 1 + | addi RA, RA, offsetof(GCfuncL, uvptr) + | srwi TMP0, RD, 3 + | lwzx UPVAL:RB, LFUNC:RB, RA + | not TMP0, TMP0 + | lwz TMP1, UPVAL:RB->v + | stw TMP0, 0(TMP1) + | ins_next2 + break; + + case BC_UCLO: + | // RA = level*8, RD = target + | lwz TMP1, L->openupval + | branch_RD // Do this first since RD is not saved. + | stw BASE, L->base + | cmplwi TMP1, 0 + | mr CARG1, L + | beq >1 + | add CARG2, BASE, RA + | bl extern lj_func_closeuv // (lua_State *L, TValue *level) + | lwz BASE, L->base + |1: + | ins_next + break; + + case BC_FNEW: + | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype) + | srwi TMP1, RD, 1 + | stw BASE, L->base + | subfic TMP1, TMP1, -4 + | stw PC, SAVE_PC + | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 + | mr CARG1, L + | lwz CARG3, FRAME_FUNC(BASE) + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | bl extern lj_func_newL_gc + | // Returns GCfuncL *. + | lwz BASE, L->base + | evmergelo LFUNC:CRET1, TISFUNC, LFUNC:CRET1 + | evstddx LFUNC:CRET1, BASE, RA + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~) + | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH) + | mr CARG1, L + | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH) + | stw BASE, L->base + | cmplw TMP0, TMP1 + | stw PC, SAVE_PC + | bge >5 + |1: + if (op == BC_TNEW) { + | rlwinm CARG2, RD, 29, 21, 31 + | rlwinm CARG3, RD, 18, 27, 31 + | cmpwi CARG2, 0x7ff + | li TMP1, 0x801 + | iseleq CARG2, TMP1, CARG2 + | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Returns Table *. + } else { + | srwi TMP1, RD, 1 + | subfic TMP1, TMP1, -4 + | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4 + | bl extern lj_tab_dup // (lua_State *L, Table *kt) + | // Returns Table *. + } + | lwz BASE, L->base + | evmergelo TAB:CRET1, TISTAB, TAB:CRET1 + | evstddx TAB:CRET1, BASE, RA + | ins_next + |5: + | mr SAVE0, RD + | bl extern lj_gc_step_fixtop // (lua_State *L) + | mr RD, SAVE0 + | mr CARG1, L + | b <1 + break; + + case BC_GGET: + | // RA = dst*8, RD = str_const*8 (~) + case BC_GSET: + | // RA = src*8, RD = str_const*8 (~) + | lwz LFUNC:TMP2, FRAME_FUNC(BASE) + | srwi TMP1, RD, 1 + | lwz TAB:RB, LFUNC:TMP2->env + | subfic TMP1, TMP1, -4 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + if (op == BC_GGET) { + | b ->BC_TGETS_Z + } else { + | b ->BC_TSETS_Z + } + break; + + case BC_TGETV: + | // RA = dst*8, RB = table*8, RC = key*8 + | evlddx TAB:RB, BASE, RB + | evlddx RC, BASE, RC + | checktab TAB:RB + | checkfail ->vmeta_tgetv + | checknum RC + | checkfail >5 + | // Convert number key to integer + | efdctsi TMP2, RC + | lwz TMP0, TAB:RB->asize + | efdcfsi TMP1, TMP2 + | cmplw cr0, TMP0, TMP2 + | efdcmpeq cr1, RC, TMP1 + | lwz TMP1, TAB:RB->array + | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt + | slwi TMP2, TMP2, 3 + | ble ->vmeta_tgetv // Integer key and in array part? + | evlddx TMP1, TMP1, TMP2 + | checknil TMP1 + | checkok >2 + |1: + | evstddx TMP1, BASE, RA + | ins_next + | + |2: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP0, TAB:TMP2->nomm + | andi. TMP0, TMP0, 1<<MM_index + | bne <1 // 'no __index' flag set: done. + | b ->vmeta_tgetv + | + |5: + | checkstr STR:RC // String key? + | checkok ->BC_TGETS_Z + | b ->vmeta_tgetv + break; + case BC_TGETS: + | // RA = dst*8, RB = table*8, RC = str_const*8 (~) + | evlddx TAB:RB, BASE, RB + | srwi TMP1, RC, 1 + | checktab TAB:RB + | subfic TMP1, TMP1, -4 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + | checkfail ->vmeta_tgets1 + |->BC_TGETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 + | lwz TMP0, TAB:RB->hmask + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:RB->node + | evmergelo STR:RC, TISSTR, STR:RC + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |1: + | evldd TMP0, NODE:TMP2->key + | evldd TMP1, NODE:TMP2->val + | evcmpeq TMP0, STR:RC + | checkanyfail >4 + | checknil TMP1 + | checkok >5 // Key found, but nil value? + |3: + | evstddx TMP1, BASE, RA + | ins_next + | + |4: // Follow hash chain. + | lwz NODE:TMP2, NODE:TMP2->next + | cmplwi NODE:TMP2, 0 + | bne <1 + | // End of hash chain: key not found, nil result. + | evmr TMP1, TISNIL + | + |5: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <3 // No metatable: done. + | lbz TMP0, TAB:TMP2->nomm + | andi. TMP0, TMP0, 1<<MM_index + | bne <3 // 'no __index' flag set: done. + | b ->vmeta_tgets + break; + case BC_TGETB: + | // RA = dst*8, RB = table*8, RC = index*8 + | evlddx TAB:RB, BASE, RB + | srwi TMP0, RC, 3 + | checktab TAB:RB + | checkfail ->vmeta_tgetb + | lwz TMP1, TAB:RB->asize + | lwz TMP2, TAB:RB->array + | cmplw TMP0, TMP1 + | bge ->vmeta_tgetb + | evlddx TMP1, TMP2, RC + | checknil TMP1 + | checkok >5 + |1: + | ins_next1 + | evstddx TMP1, BASE, RA + | ins_next2 + | + |5: // Check for __index if table value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP2, TAB:TMP2->nomm + | andi. TMP2, TMP2, 1<<MM_index + | bne <1 // 'no __index' flag set: done. + | b ->vmeta_tgetb // Caveat: preserve TMP0! + break; + + case BC_TSETV: + | // RA = src*8, RB = table*8, RC = key*8 + | evlddx TAB:RB, BASE, RB + | evlddx RC, BASE, RC + | checktab TAB:RB + | checkfail ->vmeta_tsetv + | checknum RC + | checkfail >5 + | // Convert number key to integer + | efdctsi TMP2, RC + | evlddx SAVE0, BASE, RA + | lwz TMP0, TAB:RB->asize + | efdcfsi TMP1, TMP2 + | cmplw cr0, TMP0, TMP2 + | efdcmpeq cr1, RC, TMP1 + | lwz TMP1, TAB:RB->array + | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt + | slwi TMP0, TMP2, 3 + | ble ->vmeta_tsetv // Integer key and in array part? + | lbz TMP3, TAB:RB->marked + | evlddx TMP2, TMP1, TMP0 + | checknil TMP2 + | checkok >3 + |1: + | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table) + | evstddx SAVE0, TMP1, TMP0 + | bne >7 + |2: + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | lwz TAB:TMP2, TAB:RB->metatable + | cmplwi TAB:TMP2, 0 + | beq <1 // No metatable: done. + | lbz TMP2, TAB:TMP2->nomm + | andi. TMP2, TMP2, 1<<MM_newindex + | bne <1 // 'no __newindex' flag set: done. + | b ->vmeta_tsetv + | + |5: + | checkstr STR:RC // String key? + | checkok ->BC_TSETS_Z + | b ->vmeta_tsetv + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <2 + break; + case BC_TSETS: + | // RA = src*8, RB = table*8, RC = str_const*8 (~) + | evlddx TAB:RB, BASE, RB + | srwi TMP1, RC, 1 + | checktab TAB:RB + | subfic TMP1, TMP1, -4 + | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4 + | checkfail ->vmeta_tsets1 + |->BC_TSETS_Z: + | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8 + | lwz TMP0, TAB:RB->hmask + | lwz TMP1, STR:RC->hash + | lwz NODE:TMP2, TAB:RB->node + | evmergelo STR:RC, TISSTR, STR:RC + | stb ZERO, TAB:RB->nomm // Clear metamethod cache. + | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask + | evlddx SAVE0, BASE, RA + | slwi TMP0, TMP1, 5 + | slwi TMP1, TMP1, 3 + | sub TMP1, TMP0, TMP1 + | lbz TMP3, TAB:RB->marked + | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) + |1: + | evldd TMP0, NODE:TMP2->key + | evldd TMP1, NODE:TMP2->val + | evcmpeq TMP0, STR:RC + | checkanyfail >5 + | checknil TMP1 + | checkok >4 // Key found, but nil value? + |2: + | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | evstdd SAVE0, NODE:TMP2->val + | bne >7 + |3: + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | lwz TAB:TMP1, TAB:RB->metatable + | cmplwi TAB:TMP1, 0 + | beq <2 // No metatable: done. + | lbz TMP0, TAB:TMP1->nomm + | andi. TMP0, TMP0, 1<<MM_newindex + | bne <2 // 'no __newindex' flag set: done. + | b ->vmeta_tsets + | + |5: // Follow hash chain. + | lwz NODE:TMP2, NODE:TMP2->next + | cmplwi NODE:TMP2, 0 + | bne <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | lwz TAB:TMP1, TAB:RB->metatable + | la CARG3, DISPATCH_GL(tmptv)(DISPATCH) + | stw PC, SAVE_PC + | mr CARG1, L + | cmplwi TAB:TMP1, 0 + | stw BASE, L->base + | beq >6 // No metatable: continue. + | lbz TMP0, TAB:TMP1->nomm + | andi. TMP0, TMP0, 1<<MM_newindex + | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mr CARG2, TAB:RB + | evstdd STR:RC, 0(CARG3) + | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Returns TValue *. + | lwz BASE, L->base + | evstdd SAVE0, 0(CRET1) + | b <3 // No 2nd write barrier needed. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <3 + break; + case BC_TSETB: + | // RA = src*8, RB = table*8, RC = index*8 + | evlddx TAB:RB, BASE, RB + | srwi TMP0, RC, 3 + | checktab TAB:RB + | checkfail ->vmeta_tsetb + | lwz TMP1, TAB:RB->asize + | lwz TMP2, TAB:RB->array + | lbz TMP3, TAB:RB->marked + | cmplw TMP0, TMP1 + | evlddx SAVE0, BASE, RA + | bge ->vmeta_tsetb + | evlddx TMP1, TMP2, RC + | checknil TMP1 + | checkok >5 + |1: + | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + | evstddx SAVE0, TMP2, RC + | bne >7 + |2: + | ins_next + | + |5: // Check for __newindex if previous value is nil. + | lwz TAB:TMP1, TAB:RB->metatable + | cmplwi TAB:TMP1, 0 + | beq <1 // No metatable: done. + | lbz TMP1, TAB:TMP1->nomm + | andi. TMP1, TMP1, 1<<MM_newindex + | bne <1 // 'no __newindex' flag set: done. + | b ->vmeta_tsetb // Caveat: preserve TMP0! + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <2 + break; + + case BC_TSETM: + | // RA = base*8 (table at base-1), RD = num_const*8 (start index) + | add RA, BASE, RA + |1: + | add TMP3, KBASE, RD + | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table. + | addic. TMP0, MULTRES, -8 + | lwz TMP3, 4(TMP3) // Integer constant is in lo-word. + | srwi CARG3, TMP0, 3 + | beq >4 // Nothing to copy? + | add CARG3, CARG3, TMP3 + | lwz TMP2, TAB:CARG2->asize + | slwi TMP1, TMP3, 3 + | lbz TMP3, TAB:CARG2->marked + | cmplw CARG3, TMP2 + | add TMP2, RA, TMP0 + | lwz TMP0, TAB:CARG2->array + | bgt >5 + | add TMP1, TMP1, TMP0 + | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table) + |3: // Copy result slots to table. + | evldd TMP0, 0(RA) + | addi RA, RA, 8 + | cmpw cr1, RA, TMP2 + | evstdd TMP0, 0(TMP1) + | addi TMP1, TMP1, 8 + | blt cr1, <3 + | bne >7 + |4: + | ins_next + | + |5: // Need to resize array part. + | stw BASE, L->base + | mr CARG1, L + | stw PC, SAVE_PC + | mr SAVE0, RD + | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | // Must not reallocate the stack. + | mr RD, SAVE0 + | b <1 + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:RB, TMP3, TMP0 + | b <4 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8 + | add NARGS8:RC, NARGS8:RC, MULTRES + | // Fall through. Assumes BC_CALL follows. + break; + case BC_CALL: + | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8 + | evlddx LFUNC:RB, BASE, RA + | mr TMP2, BASE + | add BASE, BASE, RA + | subi NARGS8:RC, NARGS8:RC, 8 + | checkfunc LFUNC:RB + | addi BASE, BASE, 8 + | checkfail ->vmeta_call + | ins_call + break; + + case BC_CALLMT: + | // RA = base*8, (RB = 0,) RC = extra_nargs*8 + | add NARGS8:RC, NARGS8:RC, MULTRES + | // Fall through. Assumes BC_CALLT follows. + break; + case BC_CALLT: + | // RA = base*8, (RB = 0,) RC = (nargs+1)*8 + | evlddx LFUNC:RB, BASE, RA + | add RA, BASE, RA + | lwz TMP1, FRAME_PC(BASE) + | subi NARGS8:RC, NARGS8:RC, 8 + | checkfunc LFUNC:RB + | addi RA, RA, 8 + | checkfail ->vmeta_callt + |->BC_CALLT_Z: + | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand. + | lbz TMP3, LFUNC:RB->ffid + | xori TMP2, TMP1, FRAME_VARG + | cmplwi cr1, NARGS8:RC, 0 + | bne >7 + |1: + | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC. + | li TMP2, 0 + | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function? + | beq cr1, >3 + |2: + | addi TMP3, TMP2, 8 + | evlddx TMP0, RA, TMP2 + | cmplw cr1, TMP3, NARGS8:RC + | evstddx TMP0, BASE, TMP2 + | mr TMP2, TMP3 + | bne cr1, <2 + |3: + | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt + | beq >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function with a Lua frame below. + | lwz INS, -4(TMP1) + | decode_RA8 RA, INS + | sub TMP1, BASE, RA + | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1) + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE. + | b <4 + | + |7: // Tailcall from a vararg function. + | andi. TMP0, TMP2, FRAME_TYPEP + | bne <1 // Vararg frame below? + | sub BASE, BASE, TMP2 // Relocate BASE down. + | lwz TMP1, FRAME_PC(BASE) + | andi. TMP0, TMP1, FRAME_TYPE + | b <1 + break; + + case BC_ITERC: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8)) + | subi RA, RA, 24 // evldd doesn't support neg. offsets. + | mr TMP2, BASE + | evlddx LFUNC:RB, BASE, RA + | add BASE, BASE, RA + | evldd TMP0, 8(BASE) + | evldd TMP1, 16(BASE) + | evstdd LFUNC:RB, 24(BASE) // Copy callable. + | checkfunc LFUNC:RB + | evstdd TMP0, 32(BASE) // Copy state. + | li NARGS8:RC, 16 // Iterators get 2 arguments. + | evstdd TMP1, 40(BASE) // Copy control var. + | addi BASE, BASE, 32 + | checkfail ->vmeta_call + | ins_call + break; + + case BC_ITERN: + | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8) +#if LJ_HASJIT + | // NYI: add hotloop, record BC_ITERN. +#endif + | add RA, BASE, RA + | lwz TAB:RB, -12(RA) + | lwz RC, -4(RA) // Get index from control var. + | lwz TMP0, TAB:RB->asize + | lwz TMP1, TAB:RB->array + | addi PC, PC, 4 + |1: // Traverse array part. + | cmplw RC, TMP0 + | slwi TMP3, RC, 3 + | bge >5 // Index points after array part? + | evlddx TMP2, TMP1, TMP3 + | checknil TMP2 + | lwz INS, -4(PC) + | checkok >4 + | efdcfsi TMP0, RC + | addi RC, RC, 1 + | addis TMP3, PC, -(BCBIAS_J*4 >> 16) + | evstdd TMP2, 8(RA) + | decode_RD4 TMP1, INS + | stw RC, -4(RA) // Update control var. + | add PC, TMP1, TMP3 + | evstdd TMP0, 0(RA) + |3: + | ins_next + | + |4: // Skip holes in array part. + | addi RC, RC, 1 + | b <1 + | + |5: // Traverse hash part. + | lwz TMP1, TAB:RB->hmask + | sub RC, RC, TMP0 + | lwz TMP2, TAB:RB->node + |6: + | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1. + | slwi TMP3, RC, 5 + | bgt <3 + | slwi RB, RC, 3 + | sub TMP3, TMP3, RB + | evlddx RB, TMP2, TMP3 + | add NODE:TMP3, TMP2, TMP3 + | checknil RB + | lwz INS, -4(PC) + | checkok >7 + | evldd TMP3, NODE:TMP3->key + | addis TMP2, PC, -(BCBIAS_J*4 >> 16) + | evstdd RB, 8(RA) + | add RC, RC, TMP0 + | decode_RD4 TMP1, INS + | evstdd TMP3, 0(RA) + | addi RC, RC, 1 + | add PC, TMP1, TMP2 + | stw RC, -4(RA) // Update control var. + | b <3 + | + |7: // Skip holes in hash part. + | addi RC, RC, 1 + | b <6 + break; + + case BC_ISNEXT: + | // RA = base*8, RD = target (points to ITERN) + | add RA, BASE, RA + | li TMP2, -24 + | evlddx CFUNC:TMP1, RA, TMP2 + | lwz TMP2, -16(RA) + | lwz TMP3, -8(RA) + | evmergehi TMP0, CFUNC:TMP1, CFUNC:TMP1 + | cmpwi cr0, TMP2, LJ_TTAB + | cmpwi cr1, TMP0, LJ_TFUNC + | cmpwi cr6, TMP3, LJ_TNIL + | bne cr1, >5 + | lbz TMP1, CFUNC:TMP1->ffid + | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq + | cmpwi cr7, TMP1, FF_next_N + | srwi TMP0, RD, 1 + | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq + | add TMP3, PC, TMP0 + | bne cr0, >5 + | stw ZERO, -4(RA) // Initialize control var. + | addis PC, TMP3, -(BCBIAS_J*4 >> 16) + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | li TMP0, BC_JMP + | li TMP1, BC_ITERC + | stb TMP0, -1(PC) + | addis PC, TMP3, -(BCBIAS_J*4 >> 16) + | stb TMP1, 3(PC) + | b <1 + break; + + case BC_VARG: + | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8 + | lwz TMP0, FRAME_PC(BASE) + | add RC, BASE, RC + | add RA, BASE, RA + | addi RC, RC, FRAME_VARG + | add TMP2, RA, RB + | subi TMP3, BASE, 8 // TMP3 = vtop + | sub RC, RC, TMP0 // RC = vbase + | // Note: RC may now be even _above_ BASE if nargs was < numparams. + | cmplwi cr1, RB, 0 + | sub. TMP1, TMP3, RC + | beq cr1, >5 // Copy all varargs? + | subi TMP2, TMP2, 16 + | ble >2 // No vararg slots? + |1: // Copy vararg slots to destination slots. + | evldd TMP0, 0(RC) + | addi RC, RC, 8 + | evstdd TMP0, 0(RA) + | cmplw RA, TMP2 + | cmplw cr1, RC, TMP3 + | bge >3 // All destination slots filled? + | addi RA, RA, 8 + | blt cr1, <1 // More vararg slots? + |2: // Fill up remainder with nil. + | evstdd TISNIL, 0(RA) + | cmplw RA, TMP2 + | addi RA, RA, 8 + | blt <2 + |3: + | ins_next + | + |5: // Copy all varargs. + | lwz TMP0, L->maxstack + | li MULTRES, 8 // MULTRES = (0+1)*8 + | ble <3 // No vararg slots? + | add TMP2, RA, TMP1 + | cmplw TMP2, TMP0 + | addi MULTRES, TMP1, 8 + | bgt >7 + |6: + | evldd TMP0, 0(RC) + | addi RC, RC, 8 + | evstdd TMP0, 0(RA) + | cmplw RC, TMP3 + | addi RA, RA, 8 + | blt <6 // More vararg slots? + | b <3 + | + |7: // Grow stack for varargs. + | mr CARG1, L + | stw RA, L->top + | sub SAVE0, RC, BASE // Need delta, because BASE may change. + | stw BASE, L->base + | sub RA, RA, BASE + | stw PC, SAVE_PC + | srwi CARG2, TMP1, 3 + | bl extern lj_state_growstack // (lua_State *L, int n) + | lwz BASE, L->base + | add RA, BASE, RA + | add RC, BASE, SAVE0 + | subi TMP3, BASE, 8 + | b <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | // RA = results*8, RD = extra_nresults*8 + | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8. + | // Fall through. Assumes BC_RET follows. + break; + + case BC_RET: + | // RA = results*8, RD = (nresults+1)*8 + | lwz PC, FRAME_PC(BASE) + | add RA, BASE, RA + | mr MULTRES, RD + |1: + | andi. TMP0, PC, FRAME_TYPE + | xori TMP1, PC, FRAME_VARG + | bne ->BC_RETV_Z + | + |->BC_RET_Z: + | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return + | lwz INS, -4(PC) + | cmpwi RD, 8 + | subi TMP2, BASE, 8 + | subi RC, RD, 8 + | decode_RB8 RB, INS + | beq >3 + | li TMP1, 0 + |2: + | addi TMP3, TMP1, 8 + | evlddx TMP0, RA, TMP1 + | cmpw TMP3, RC + | evstddx TMP0, TMP2, TMP1 + | beq >3 + | addi TMP1, TMP3, 8 + | evlddx TMP0, RA, TMP3 + | cmpw TMP1, RC + | evstddx TMP0, TMP2, TMP3 + | bne <2 + |3: + |5: + | cmplw RB, RD + | decode_RA8 RA, INS + | bgt >6 + | sub BASE, TMP2, RA + | lwz LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | evstddx TISNIL, TMP2, TMP1 + | b <5 + | + |->BC_RETV_Z: // Non-standard return case. + | andi. TMP2, TMP1, FRAME_TYPEP + | bne ->vm_return + | // Return from vararg function: relocate BASE down. + | sub BASE, BASE, TMP1 + | lwz PC, FRAME_PC(BASE) + | b <1 + break; + + case BC_RET0: case BC_RET1: + | // RA = results*8, RD = (nresults+1)*8 + | lwz PC, FRAME_PC(BASE) + | add RA, BASE, RA + | mr MULTRES, RD + | andi. TMP0, PC, FRAME_TYPE + | xori TMP1, PC, FRAME_VARG + | bne ->BC_RETV_Z + | + | lwz INS, -4(PC) + | subi TMP2, BASE, 8 + | decode_RB8 RB, INS + if (op == BC_RET1) { + | evldd TMP0, 0(RA) + | evstdd TMP0, 0(TMP2) + } + |5: + | cmplw RB, RD + | decode_RA8 RA, INS + | bgt >6 + | sub BASE, TMP2, RA + | lwz LFUNC:TMP1, FRAME_FUNC(BASE) + | ins_next1 + | lwz TMP1, LFUNC:TMP1->pc + | lwz KBASE, PC2PROTO(k)(TMP1) + | ins_next2 + | + |6: // Fill up results with nil. + | subi TMP1, RD, 8 + | addi RD, RD, 8 + | evstddx TISNIL, TMP2, TMP1 + | b <5 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORL: +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_IFORL follows. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + | // RA = base*8, RD = target (after end of loop or start of loop) + vk = (op == BC_IFORL || op == BC_JFORL); + | add RA, BASE, RA + | evldd TMP1, FORL_IDX*8(RA) + | evldd TMP3, FORL_STEP*8(RA) + | evldd TMP2, FORL_STOP*8(RA) + if (!vk) { + | evcmpgtu cr0, TMP1, TISNUM + | evcmpgtu cr7, TMP3, TISNUM + | evcmpgtu cr1, TMP2, TISNUM + | cror 4*cr0+lt, 4*cr0+lt, 4*cr7+lt + | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt + | blt ->vmeta_for + } + if (vk) { + | efdadd TMP1, TMP1, TMP3 + | evstdd TMP1, FORL_IDX*8(RA) + } + | evcmpgts TMP3, TISNIL + | evstdd TMP1, FORL_EXT*8(RA) + | bge >2 + | efdcmpgt TMP1, TMP2 + |1: + if (op != BC_JFORL) { + | srwi RD, RD, 1 + | add RD, PC, RD + if (op == BC_JFORI) { + | addis PC, RD, -(BCBIAS_J*4 >> 16) + } else { + | addis RD, RD, -(BCBIAS_J*4 >> 16) + } + } + if (op == BC_FORI) { + | iselgt PC, RD, PC + } else if (op == BC_IFORL) { + | iselgt PC, PC, RD + } else { + | ble =>BC_JLOOP + } + | ins_next + |2: + | efdcmpgt TMP2, TMP1 + | b <1 + break; + + case BC_ITERL: +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_IITERL follows. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | // RA = base*8, RD = target + | evlddx TMP1, BASE, RA + | subi RA, RA, 8 + | checknil TMP1 + | checkok >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | NYI + } else { + | branch_RD // Otherwise save control var + branch. + | evstddx TMP1, BASE, RA + } + |1: + | ins_next + break; + + case BC_LOOP: + | // RA = base*8, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. +#if LJ_HASJIT + | hotloop +#endif + | // Fall through. Assumes BC_ILOOP follows. + break; + + case BC_ILOOP: + | // RA = base*8, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: +#if LJ_HASJIT + | NYI +#endif + break; + + case BC_JMP: + | // RA = base*8 (only used by trace recorder), RD = target + | branch_RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: +#if LJ_HASJIT + | hotcall +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | lwz TMP2, L->maxstack + | lbz TMP1, -4+PC2PROTO(numparams)(PC) + | lwz KBASE, -4+PC2PROTO(k)(PC) + | cmplw RA, TMP2 + | slwi TMP1, TMP1, 3 + | bgt ->vm_growstack_l + | ins_next1 + |2: + | cmplw NARGS8:RC, TMP1 // Check for missing parameters. + | ble >3 + if (op == BC_JFUNCF) { + | NYI + } else { + | ins_next2 + } + | + |3: // Clear missing parameters. + | evstddx TISNIL, BASE, NARGS8:RC + | addi NARGS8:RC, NARGS8:RC, 8 + | b <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | NYI // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8 + | lwz TMP2, L->maxstack + | add TMP1, BASE, RC + | add TMP0, RA, RC + | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC. + | addi TMP3, RC, 8+FRAME_VARG + | lwz KBASE, -4+PC2PROTO(k)(PC) + | cmplw TMP0, TMP2 + | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG. + | bge ->vm_growstack_l + | lbz TMP2, -4+PC2PROTO(numparams)(PC) + | mr RA, BASE + | mr RC, TMP1 + | ins_next1 + | cmpwi TMP2, 0 + | addi BASE, TMP1, 8 + | beq >3 + |1: + | cmplw RA, RC // Less args than parameters? + | evldd TMP0, 0(RA) + | bge >4 + | evstdd TISNIL, 0(RA) // Clear old fixarg slot (help the GC). + | addi RA, RA, 8 + |2: + | addic. TMP2, TMP2, -1 + | evstdd TMP0, 8(TMP1) + | addi TMP1, TMP1, 8 + | bne <1 + |3: + | ins_next2 + | + |4: // Clear missing parameters. + | evmr TMP0, TISNIL + | b <2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8 + if (op == BC_FUNCC) { + | lwz TMP3, CFUNC:RB->f + } else { + | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH) + } + | add TMP1, RA, NARGS8:RC + | lwz TMP2, L->maxstack + | add RC, BASE, NARGS8:RC + | stw BASE, L->base + | cmplw TMP1, TMP2 + | stw RC, L->top + | li_vmstate C + | mtctr TMP3 + if (op == BC_FUNCCW) { + | lwz CARG2, CFUNC:RB->f + } + | mr CARG1, L + | bgt ->vm_growstack_c // Need to grow stack. + | st_vmstate + | bctrl // (lua_State *L [, lua_CFunction f]) + | // Returns nresults. + | lwz TMP1, L->top + | slwi RD, CRET1, 3 + | lwz BASE, L->base + | li_vmstate INTERP + | lwz PC, FRAME_PC(BASE) // Fetch PC of caller. + | sub RA, TMP1, RD // RA = L->top - nresults*8 + | st_vmstate + | b ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n", + (int)ctx->codesz, CFRAME_SIZE); + for (i = 14; i <= 31; i++) +#if LJ_TARGET_PPCSPE + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n", + 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i)); +#else +#error "missing frame info for saved registers" +#endif + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n", + (int)ctx->codesz, CFRAME_SIZE); + for (i = 14; i <= 31; i++) +#if LJ_TARGET_PPCSPE + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n", + 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i)); +#else +#error "missing frame info for saved registers" +#endif + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE1:\n\n"); + break; + default: + break; + } +} + diff --git a/src/luajit2/src/buildvm_ppcspe.h b/src/luajit2/src/buildvm_ppcspe.h new file mode 100644 index 0000000000..257fe353c6 --- /dev/null +++ b/src/luajit2/src/buildvm_ppcspe.h @@ -0,0 +1,6108 @@ +/* +** This file has been pre-processed with DynASM. +** http://luajit.org/dynasm.html +** DynASM version 1.3.0, DynASM ppc version 1.3.0 +** DO NOT EDIT! The original file is in "buildvm_ppc.dasc". +*/ + +#if DASM_VERSION != 10300 +#error "Version mismatch between DynASM and included encoding engine" +#endif + +#define DASM_SECTION_CODE_OP 0 +#define DASM_SECTION_CODE_SUB 1 +#define DASM_MAXSECTION 2 +static const unsigned int build_actionlist[4992] = { +0x00010001, +0x00060014, +0x72000000, +0x00090200, +0x11000229, +0x000980b0, +0x41820000, +0x00050815, +0x8209fff8, +0x7d2e4b78, +0x9514fff8, +0x00060016, +0x72000000, +0x00090200, +0x398c0008, +0x7d936378, +0x41820000, +0x00050817, +0x00060018, +0x2c000000, +0x00098200, +0x56090038, +0x38000000, +0x00098200, +0x7d297050, +0x40820000, +0x00050814, +0x350cfff8, +0x91320000, +0x00098200, +0x81210018, +0x39cefff8, +0x90110000, +0x00098200, +0x55291800, +0x000900a1, +0x41820000, +0x00050802, +0x0006000b, +0x3508fff8, +0x10140301, +0x3a940008, +0x100e0321, +0x39ce0008, +0x40820000, +0x0005080b, +0x0006000c, +0x7c096000, +0x40820000, +0x00050806, +0x0006000d, +0x91d20000, +0x00098200, +0x00060019, +0x00000000, +0x80010014, +0x38600000, +0x90120000, +0x00098200, +0x0006001a, +0x800100b4, +0x11c12301, +0x11e12b01, +0x12013301, +0x12213b01, +0x12414301, +0x12614b01, +0x7c0803a6, +0x12815301, +0x12a15b01, +0x12c16301, +0x12e16b01, +0x13017301, +0x13217b01, +0x13418301, +0x13618b01, +0x13819301, +0x13a19b01, +0x13c1a301, +0x13e1ab01, +0x382100b0, +0x4e800020, +0x00060010, +0x40810000, +0x00050807, +0x81120000, +0x00098200, +0x7c0e4040, +0x40800000, +0x00050808, +0x134e0321, +0x398c0008, +0x39ce0008, +0x48000000, +0x0005000c, +0x00060011, +0x7c096050, +0x2c090000, +0x7c007050, +0x7dce009e, +0x48000000, +0x0005000d, +0x00060012, +0x91d20000, +0x00098200, +0x7d956378, +0x7d244b78, +0x7e439378, +0x48000001, +0x00030000, +0x81210018, +0x7eacab78, +0x55291800, +0x000900a1, +0x81d20000, +0x00098200, +0x48000000, +0x0005000c, +0x0006001b, +0x7c611b78, +0x7c832378, +0x0006001c, +0x82410010, +0x38000000, +0x00098200, +0x81120000, +0x00098200, +0x90080000, +0x00098200, +0x48000000, +0x0005001a, +0x0006001d, +0x00000000, +0x5461003a, +0x0006001e, +0x82410010, +0x12c00229, +0x000980b0, +0x13200229, +0x000980b0, +0x3f604338, +0x13000229, +0x000980b0, +0x38000000, +0x81d20000, +0x00098200, +0x137b022d, +0x82320000, +0x00098200, +0x12e00229, +0x000980b0, +0x39000000, +0x00098200, +0x13400229, +0x000980b0, +0x38000000, +0x00098200, +0x820efff8, +0x3a8efff8, +0x3a310000, +0x00098200, +0x91140000, +0x39800010, +0x90110000, +0x00098200, +0x48000000, +0x00050016, +0x0006001f, +0x38800000, +0x00098200, +0x48000000, +0x00050002, +0x00060020, +0x7d6e5a14, +0x7e8ea050, +0x91d20000, +0x00098200, +0x3a100004, +0x91720000, +0x00098200, +0x568400fe, +0x000900ab, +0x0006000c, +0x9201000c, +0x7e439378, +0x48000001, +0x00030000, +0x00000000, +0x81d20000, +0x00098200, +0x81720000, +0x00098200, +0x814efffc, +0x7d6e5850, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00060021, +0x9421ff50, +0x11c12321, +0x11e12b21, +0x12013321, +0x12213b21, +0x12414321, +0x12614b21, +0x7c0802a6, +0x12815321, +0x12a15b21, +0x12c16321, +0x12e16b21, +0x13017321, +0x13217b21, +0x900100b4, +0x13418321, +0x13618b21, +0x13819321, +0x13a19b21, +0x13c1a321, +0x13e1ab21, +0x7c721b78, +0x82320000, +0x00098200, +0x7c8e2378, +0x89120000, +0x00098200, +0x92410010, +0x3a000000, +0x00098200, +0x38010000, +0x00098200, +0x3a310000, +0x00098200, +0x90a10018, +0x28080000, +0x90a1001c, +0x90120000, +0x00098200, +0x90a10014, +0x9061000c, +0x41820000, +0x00050803, +0x7dd47378, +0x81d20000, +0x00098200, +0x12c00229, +0x000980b0, +0x81120000, +0x00098200, +0x13200229, +0x000980b0, +0x3f604338, +0x13000229, +0x000980b0, +0x820efff8, +0x39200000, +0x12e00229, +0x000980b0, +0x7d8e4050, +0x137b4a2d, +0x98b20000, +0x00098200, +0x72000000, +0x00090200, +0x38000000, +0x00098200, +0x398c0008, +0x13400229, +0x000980b0, +0x7d936378, +0x90110000, +0x00098200, +0x00000000, +0x41820000, +0x00050817, +0x48000000, +0x00050018, +0x00060022, +0x9421ff50, +0x11c12321, +0x11e12b21, +0x12013321, +0x12213b21, +0x12414321, +0x12614b21, +0x7c0802a6, +0x12815321, +0x12a15b21, +0x12c16321, +0x12e16b21, +0x13017321, +0x13217b21, +0x900100b4, +0x13418321, +0x13618b21, +0x13819321, +0x13a19b21, +0x13c1a321, +0x13e1ab21, +0x3a000000, +0x00098200, +0x90c1001c, +0x48000000, +0x00050001, +0x00060023, +0x9421ff50, +0x11c12321, +0x11e12b21, +0x12013321, +0x12213b21, +0x12414321, +0x12614b21, +0x7c0802a6, +0x12815321, +0x12a15b21, +0x12c16321, +0x12e16b21, +0x13017321, +0x13217b21, +0x900100b4, +0x13418321, +0x13618b21, +0x13819321, +0x13a19b21, +0x13c1a321, +0x13e1ab21, +0x3a000000, +0x00098200, +0x0006000b, +0x81030000, +0x00098200, +0x90a10018, +0x7c721b78, +0x90610010, +0x7c8e2378, +0x90320000, +0x00098200, +0x82320000, +0x00098200, +0x9061000c, +0x91010014, +0x3a310000, +0x00098200, +0x0006000d, +0x81320000, +0x00098200, +0x12c00229, +0x000980b0, +0x81120000, +0x00098200, +0x13200229, +0x000980b0, +0x7e107214, +0x13000229, +0x000980b0, +0x3f604338, +0x38000000, +0x7e098050, +0x12e00229, +0x000980b0, +0x7d6e4050, +0x137b022d, +0x38000000, +0x00098200, +0x13400229, +0x000980b0, +0x90110000, +0x00098200, +0x00060024, +0x00000000, +0x3800fff8, +0x114e0300, +0x100aca34, +0x40800000, +0x00050825, +0x00060026, +0x920efff8, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00060027, +0x9421ff50, +0x11c12321, +0x11e12b21, +0x12013321, +0x12213b21, +0x12414321, +0x12614b21, +0x7c0802a6, +0x12815321, +0x12a15b21, +0x12c16321, +0x12e16b21, +0x13017321, +0x13217b21, +0x900100b4, +0x13418321, +0x13618b21, +0x13819321, +0x13a19b21, +0x13c1a321, +0x13e1ab21, +0x7c721b78, +0x80030000, +0x00098200, +0x90610010, +0x81120000, +0x00098200, +0x9061000c, +0x7c080050, +0x81120000, +0x00098200, +0x90320000, +0x00098200, +0x39200000, +0x90010018, +0x9121001c, +0x91010014, +0x7cc903a6, +0x4e800421, +0x7c6e1b79, +0x82320000, +0x00098200, +0x3a000000, +0x00098200, +0x3a310000, +0x00098200, +0x40820000, +0x0005080d, +0x48000000, +0x00050019, +0x00060015, +0x800efff4, +0x7dca7378, +0x7d2e4b78, +0x8109fffc, +0x28000000, +0x820afff0, +0x41820000, +0x00050801, +0x392cfff8, +0x81080000, +0x00098200, +0x13544b20, +0x81e80000, +0x00098200, +0x7c0903a6, +0x4e800420, +0x0006000b, +0x390afff0, +0x7d6e4050, +0x48000000, +0x00050028, +0x00060029, +0x80f0fffc, +0x388afff0, +0x54f55d78, +0x10140301, +0x7d0eaa14, +0x91d20000, +0x00098200, +0x7c082040, +0x7ca82050, +0x54f4dd78, +0x10040321, +0x40820000, +0x0005082a, +0x00000000, +0x100ea320, +0x48000000, +0x0005002b, +0x0006002c, +0x11775a2d, +0x38b10000, +0x00098200, +0x54ea5d78, +0x11650321, +0x7c8e5214, +0x48000000, +0x00050001, +0x0006002d, +0x1158522d, +0x38910000, +0x00098200, +0x11775a2d, +0x11440321, +0x38b10000, +0x00098200, +0x11650321, +0x48000000, +0x00050001, +0x0006002e, +0x100002f1, +0x54ea5d78, +0x38b10000, +0x00098200, +0x7c8e5214, +0x10050321, +0x48000000, +0x00050001, +0x0006002f, +0x54ea5d78, +0x54eb9d78, +0x7c8e5214, +0x7cae5a14, +0x0006000b, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x48000001, +0x00030001, +0x28030000, +0x41820000, +0x00050803, +0x10030301, +0x100ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000d, +0x210e0000, +0x00098200, +0x81d20000, +0x00098200, +0x920efff0, +0x7e087214, +0x814efffc, +0x39600010, +0x48000000, +0x00050026, +0x00060030, +0x11775a2d, +0x38b10000, +0x00098200, +0x00000000, +0x54ea5d78, +0x11650321, +0x7c8e5214, +0x48000000, +0x00050001, +0x00060031, +0x1158522d, +0x38910000, +0x00098200, +0x11775a2d, +0x11440321, +0x38b10000, +0x00098200, +0x11650321, +0x48000000, +0x00050001, +0x00060032, +0x100002f1, +0x54ea5d78, +0x38b10000, +0x00098200, +0x7c8e5214, +0x10050321, +0x48000000, +0x00050001, +0x00060033, +0x54ea5d78, +0x54eb9d78, +0x7c8e5214, +0x7cae5a14, +0x0006000b, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x48000001, +0x00030002, +0x28030000, +0x100ea300, +0x41820000, +0x00050803, +0x10030321, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000d, +0x210e0000, +0x00098200, +0x81d20000, +0x00098200, +0x920efff0, +0x7e087214, +0x814efffc, +0x39600018, +0x100e1321, +0x48000000, +0x00050026, +0x00060034, +0x7e439378, +0x3a10fffc, +0x7c8ea214, +0x9201000c, +0x7cae6214, +0x91d20000, +0x00098200, +0x54e6063e, +0x48000001, +0x00030003, +0x0006000d, +0x28030001, +0x41810000, +0x00050835, +0x0006000e, +0x00000000, +0x80f00000, +0x3a100004, +0x54e993ba, +0x3cd00000, +0x00098200, +0x7d293214, +0x7e10481e, +0x0006002b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00060036, +0x80f0fffc, +0x10140301, +0x54e8dd78, +0x100e4320, +0x48000000, +0x0005002b, +0x00060037, +0x80140000, +0x39000000, +0x00098200, +0x7c080040, +0x48000000, +0x0005000e, +0x00060038, +0x80140000, +0x39000000, +0x00098200, +0x7c004040, +0x48000000, +0x0005000e, +0x00060039, +0x3a10fffc, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x48000001, +0x00030004, +0x48000000, +0x0005000d, +0x0006003a, +0x7cae5214, +0x7ccf5a14, +0x48000000, +0x00050001, +0x0006003b, +0x7caf5a14, +0x7cce5214, +0x48000000, +0x00050001, +0x0006003c, +0x7cae6214, +0x7ca62b78, +0x48000000, +0x00050001, +0x0006003d, +0x7cae5214, +0x7cce5a14, +0x0006000b, +0x00000000, +0x7c8ea214, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x54e7063e, +0x48000001, +0x00030005, +0x28030000, +0x41820000, +0x0005082b, +0x00060035, +0x7d0e1850, +0x9203fff0, +0x3a080000, +0x00098200, +0x7c6e1b78, +0x39600010, +0x48000000, +0x00050024, +0x0006003e, +0x00000000, +0x7c751b78, +0x00000000, +0x7c8e6214, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x48000001, +0x00030006, +0x00000000, +0x28030000, +0x40820000, +0x00050835, +0x7ea3ab78, +0x48000000, +0x0005003f, +0x00000000, +0x48000000, +0x00050035, +0x00000000, +0x00060025, +0x7e439378, +0x91320000, +0x00098200, +0x388efff8, +0x9201000c, +0x7cae5a14, +0x7d755b78, +0x48000001, +0x00030007, +0x814efffc, +0x39750008, +0x920efff8, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00060040, +0x7e439378, +0x91d20000, +0x00098200, +0x3894fff8, +0x9201000c, +0x7cb45a14, +0x7d755b78, +0x48000001, +0x00030007, +0x810efff8, +0x39750008, +0x8154fffc, +0x48000000, +0x00050041, +0x00060042, +0x7e439378, +0x91d20000, +0x00098200, +0x7e84a378, +0x9201000c, +0x7cf53b78, +0x48000001, +0x00030008, +0x00000000, +0x56a0063e, +0x00000000, +0x56b4dd78, +0x00000000, +0x2c000000, +0x00098200, +0x00000000, +0x56ac9b78, +0x00000000, +0x41820000, +0x00070800, +0x00000000, +0x48000000, +0x00070000, +0x00060043, +0x280b0008, +0x100e0301, +0x41800000, +0x00050844, +0x111ad200, +0x3a8efff8, +0x10804232, +0x820efff8, +0x40840000, +0x00050844, +0x10140321, +0x398b0008, +0x41820000, +0x00050845, +0x39000008, +0x396bfff8, +0x0006000b, +0x7c085840, +0x100e4300, +0x10144320, +0x39080008, +0x40820000, +0x0005080b, +0x48000000, +0x00050045, +0x00060046, +0x280b0008, +0x806e0000, +0x41800000, +0x00050844, +0x39200000, +0x00098200, +0x7c03b040, +0x7c6818f8, +0x7d09401e, +0x55081800, +0x000900a1, +0x392a0000, +0x00098200, +0x10694300, +0x48000000, +0x00050047, +0x00060048, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003c234, +0x11031a2c, +0x40800000, +0x00050806, +0x0006000b, +0x81430000, +0x00098200, +0x0006000c, +0x107ad217, +0x280a0000, +0x81710000, +0x00098200, +0x41820000, +0x00050847, +0x00000000, +0x800a0000, +0x00098200, +0x1078522d, +0x810b0000, +0x00098200, +0x812a0000, +0x00098200, +0x11775a2d, +0x7d080038, +0x55002800, +0x000900a1, +0x55081800, +0x000900a1, +0x7d080050, +0x7d294214, +0x0006000d, +0x10090301, +0x00090cab, +0x11090301, +0x00090cab, +0x10005a34, +0x81290000, +0x00098200, +0x41830000, +0x00050805, +0x28090000, +0x41820000, +0x00050847, +0x48000000, +0x0005000d, +0x0006000f, +0x1008d234, +0x41800000, +0x00050847, +0x10684217, +0x48000000, +0x00050047, +0x00060010, +0x2c080000, +0x00098200, +0x7d0840f8, +0x41820000, +0x0005080b, +0x1003b232, +0x55081000, +0x000900a1, +0x39200000, +0x00098200, +0x7d09401e, +0x39310000, +0x00098200, +0x7d49402e, +0x48000000, +0x0005000c, +0x00060049, +0x00000000, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000c234, +0x40830000, +0x00050844, +0x81030000, +0x00098200, +0x28080000, +0x88c30000, +0x00098200, +0x40820000, +0x00050844, +0x70c00000, +0x00090200, +0x90830000, +0x00098200, +0x41820000, +0x00050847, +0x80110000, +0x00098200, +0x54c607b8, +0x90710000, +0x00098200, +0x98c30000, +0x00098200, +0x90030000, +0x00098200, +0x48000000, +0x00050047, +0x0006004a, +0x280b0010, +0x108e0301, +0x41800000, +0x00050844, +0x1004c234, +0x38ae0008, +0x40800000, +0x00050844, +0x7e439378, +0x48000001, +0x00030009, +0x10630301, +0x48000000, +0x00050047, +0x0006004b, +0x280b0008, +0x106e0301, +0x40820000, +0x00050844, +0x1003b232, +0x41800000, +0x00050847, +0x48000000, +0x00050044, +0x0006004c, +0x00000000, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003ba34, +0x41800000, +0x00050847, +0x80110000, +0x00098200, +0x1003b232, +0x28800000, +0x91d20000, +0x00098200, +0x4c403202, +0x9201000c, +0x40820000, +0x00050844, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x7e439378, +0x7dc47378, +0x48000001, +0x0003000a, +0x10771a2d, +0x48000000, +0x00050047, +0x0006004e, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x134e5b20, +0x1004c234, +0x820efff8, +0x40800000, +0x00050844, +0x91d20000, +0x00098200, +0x7e439378, +0x91d20000, +0x00098200, +0x38ae0008, +0x9201000c, +0x48000001, +0x0003000b, +0x28030000, +0x107ad217, +0x41820000, +0x00050847, +0x100e0b01, +0x3a8efff8, +0x110e1301, +0x10140321, +0x39800000, +0x00098200, +0x11140b21, +0x48000000, +0x00050045, +0x0006004f, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003c234, +0x820efff8, +0x40800000, +0x00050844, +0x00000000, +0x81230000, +0x00098200, +0x100a0301, +0x00090cab, +0x28090000, +0x3a8efff8, +0x40820000, +0x00050844, +0x00000000, +0x100a0301, +0x00090cab, +0x3a8efff8, +0x00000000, +0x134e0b21, +0x39800000, +0x00098200, +0x10140321, +0x48000000, +0x00050045, +0x00060050, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003c234, +0x820efff8, +0x40800000, +0x00050844, +0x1004b232, +0x3cc03ff0, +0x40800000, +0x00050844, +0x112022f5, +0x80030000, +0x00098200, +0x10c6da2d, +0x81030000, +0x00098200, +0x108432e0, +0x39290001, +0x3a8efff8, +0x7c004840, +0x55261800, +0x000900a1, +0x10940321, +0x40810000, +0x00050802, +0x11083300, +0x0006000b, +0x1008d234, +0x39800000, +0x00098200, +0x41800000, +0x00050845, +0x39800000, +0x00098200, +0x11140b21, +0x48000000, +0x00050045, +0x0006000c, +0x80030000, +0x00098200, +0x28000000, +0x39800000, +0x00098200, +0x41820000, +0x00050845, +0x7d244b78, +0x48000001, +0x0003000c, +0x28030000, +0x39800000, +0x00098200, +0x41820000, +0x00050845, +0x00000000, +0x11030301, +0x48000000, +0x0005000b, +0x00060051, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003c234, +0x820efff8, +0x40800000, +0x00050844, +0x00000000, +0x81230000, +0x00098200, +0x100a0301, +0x00090cab, +0x28090000, +0x3a8efff8, +0x40820000, +0x00050844, +0x00000000, +0x100a0301, +0x00090cab, +0x3a8efff8, +0x00000000, +0x11000229, +0x39800000, +0x00098200, +0x110e0b21, +0x10140321, +0x48000000, +0x00050045, +0x00060052, +0x280b0008, +0x88d10000, +0x00098200, +0x41800000, +0x00050844, +0x7dc97378, +0x39ce0008, +0x54c607fe, +0x000900ab, +0x396bfff8, +0x3a060000, +0x00098200, +0x48000000, +0x00050024, +0x00060053, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x88d10000, +0x00098200, +0x7dc97378, +0x1004ca34, +0x40800000, +0x00050844, +0x39ce0010, +0x54c607fe, +0x000900ab, +0x10890321, +0x396bfff0, +0x10690b21, +0x3a060000, +0x00098200, +0x48000000, +0x00050024, +0x00060054, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x10031a2c, +0x2c000000, +0x00098200, +0x40820000, +0x00050844, +0x88030000, +0x00098200, +0x81030000, +0x00098200, +0x80830000, +0x00098200, +0x00000000, +0x28000000, +0x00090200, +0x81230000, +0x00098200, +0x28880000, +0x80030000, +0x00098200, +0x7f844840, +0x820efff8, +0x4f013342, +0x7d245a14, +0x4f3e1102, +0x7c890040, +0x4f18cb82, +0x9201000c, +0x4f182b82, +0x91d20000, +0x00098200, +0x41980000, +0x00050844, +0x0006000b, +0x39ce0008, +0x396bfff8, +0x3929fff8, +0x91230000, +0x00098200, +0x39000000, +0x91d20000, +0x00098200, +0x0006000c, +0x7c085800, +0x100e4300, +0x41820000, +0x00050803, +0x10044320, +0x39080008, +0x48000000, +0x0005000c, +0x0006000d, +0x38a00000, +0x7c751b78, +0x38c00000, +0x48000001, +0x00050021, +0x0006000e, +0x81350000, +0x00098200, +0x28030000, +0x00090200, +0x80d50000, +0x00098200, +0x38000000, +0x00098200, +0x81d20000, +0x00098200, +0x90110000, +0x00098200, +0x41810000, +0x00050808, +0x7d893050, +0x80120000, +0x00098200, +0x00000000, +0x280c0000, +0x7d0e6214, +0x41820000, +0x00050806, +0x7c080040, +0x39000000, +0x41810000, +0x00050809, +0x38ccfff8, +0x91350000, +0x00098200, +0x0006000f, +0x7c083040, +0x10094300, +0x100e4320, +0x39080008, +0x40820000, +0x0005080f, +0x00060010, +0x72000000, +0x00090200, +0x39000000, +0x00098200, +0x3a8efff8, +0x910efff8, +0x398c0010, +0x00060011, +0x9201000c, +0x7d936378, +0x41820000, +0x00050817, +0x48000000, +0x00050018, +0x00060012, +0x72000000, +0x00090200, +0x38c6fff8, +0x39000000, +0x00098200, +0x10060301, +0x90d50000, +0x00098200, +0x39800000, +0x00098200, +0x910efff8, +0x3a8efff8, +0x100e0321, +0x48000000, +0x00050011, +0x00060013, +0x7e439378, +0x558400fe, +0x000900ab, +0x48000001, +0x00030000, +0x38600000, +0x48000000, +0x0005000e, +0x00060055, +0x00000000, +0x806a0000, +0x00098200, +0x88030000, +0x00098200, +0x81030000, +0x00098200, +0x80830000, +0x00098200, +0x28000000, +0x00090200, +0x81230000, +0x00098200, +0x28880000, +0x80030000, +0x00098200, +0x7f844840, +0x820efff8, +0x4f013342, +0x7d245a14, +0x4f3e1102, +0x7c890040, +0x4f18cb82, +0x9201000c, +0x4f182b82, +0x91d20000, +0x00098200, +0x41980000, +0x00050844, +0x0006000b, +0x91230000, +0x00098200, +0x39000000, +0x91d20000, +0x00098200, +0x0006000c, +0x7c085800, +0x100e4300, +0x41820000, +0x00050803, +0x10044320, +0x39080008, +0x48000000, +0x0005000c, +0x0006000d, +0x38a00000, +0x7c751b78, +0x38c00000, +0x48000001, +0x00050021, +0x0006000e, +0x81350000, +0x00098200, +0x28030000, +0x00090200, +0x80d50000, +0x00098200, +0x38000000, +0x00098200, +0x00000000, +0x81d20000, +0x00098200, +0x90110000, +0x00098200, +0x41810000, +0x00050808, +0x7d893050, +0x80120000, +0x00098200, +0x280c0000, +0x7d0e6214, +0x41820000, +0x00050806, +0x7c080040, +0x39000000, +0x41810000, +0x00050809, +0x38ccfff8, +0x91350000, +0x00098200, +0x0006000f, +0x7c083040, +0x10094300, +0x100e4320, +0x39080008, +0x40820000, +0x0005080f, +0x00060010, +0x72000000, +0x00090200, +0x7dd47378, +0x398c0008, +0x00060011, +0x9201000c, +0x7d936378, +0x41820000, +0x00050817, +0x48000000, +0x00050018, +0x00060012, +0x7e439378, +0x7ea4ab78, +0x48000001, +0x0003000d, +0x00060013, +0x7e439378, +0x558400fe, +0x000900ab, +0x48000001, +0x00030000, +0x38600000, +0x48000000, +0x0005000e, +0x00060056, +0x80120000, +0x00098200, +0x00000000, +0x7d0e5a14, +0x91d20000, +0x00098200, +0x70000000, +0x00090200, +0x91120000, +0x00098200, +0x38600000, +0x00098200, +0x41820000, +0x00050844, +0x93720000, +0x00098200, +0x98720000, +0x00098200, +0x48000000, +0x0005001a, +0x00060057, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x106302e4, +0x00060047, +0x820efff8, +0x3a8efff8, +0x10740321, +0x00060058, +0x39800000, +0x00098200, +0x00060045, +0x72000000, +0x00090200, +0x7d936378, +0x40820000, +0x00050818, +0x80f0fffc, +0x54ea5d78, +0x0006000f, +0x7c0a6040, +0x54e0dd78, +0x41810000, +0x00050806, +0x80f00000, +0x3a100004, +0x7dc0a050, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00060010, +0x390cfff8, +0x398c0008, +0x13544320, +0x48000000, +0x0005000f, +0x00060059, +0x00000000, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x820efff8, +0x48000001, +0x0005005a, +0x3a8efff8, +0x10940321, +0x48000000, +0x00050058, +0x0006005b, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x820efff8, +0x48000001, +0x0005005c, +0x3a8efff8, +0x10940321, +0x48000000, +0x00050058, +0x0006005d, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x0003000e, +0x1063222d, +0x48000000, +0x00050047, +0x0006005e, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x0003000f, +0x1063222d, +0x48000000, +0x00050047, +0x0006005f, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x00000000, +0x48000001, +0x00030010, +0x1063222d, +0x48000000, +0x00050047, +0x00060060, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030011, +0x1063222d, +0x48000000, +0x00050047, +0x00060061, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030012, +0x1063222d, +0x48000000, +0x00050047, +0x00060062, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030013, +0x1063222d, +0x48000000, +0x00050047, +0x00060063, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030014, +0x1063222d, +0x48000000, +0x00050047, +0x00060064, +0x00000000, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030015, +0x1063222d, +0x48000000, +0x00050047, +0x00060065, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030016, +0x1063222d, +0x48000000, +0x00050047, +0x00060066, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030017, +0x1063222d, +0x48000000, +0x00050047, +0x00060067, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x00030018, +0x1063222d, +0x48000000, +0x00050047, +0x00060068, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x00000000, +0x48000001, +0x00030019, +0x1063222d, +0x48000000, +0x00050047, +0x00060069, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x48000001, +0x0003001a, +0x1063222d, +0x48000000, +0x00050047, +0x0006006a, +0x280b0010, +0x108e0301, +0x10ce0b01, +0x41800000, +0x00050844, +0x1066222c, +0x1003b232, +0x10a6322c, +0x40830000, +0x00050844, +0x48000001, +0x0003001b, +0x1063222d, +0x48000000, +0x00050047, +0x0006006b, +0x280b0010, +0x108e0301, +0x10ce0b01, +0x41800000, +0x00050844, +0x1066222c, +0x1003b232, +0x10a6322c, +0x40830000, +0x00050844, +0x48000001, +0x0003001c, +0x1063222d, +0x48000000, +0x00050047, +0x0006006c, +0x280b0010, +0x108e0301, +0x10ce0b01, +0x41800000, +0x00050844, +0x1066222c, +0x1003b232, +0x10a6322c, +0x40830000, +0x00050844, +0x48000001, +0x0003001d, +0x1063222d, +0x48000000, +0x00050047, +0x0006006d, +0x0006006e, +0x00000000, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x108a0301, +0x00090cab, +0x106322e8, +0x48000000, +0x00050047, +0x0006006f, +0x280b0010, +0x108e0301, +0x10ce0b01, +0x41800000, +0x00050844, +0x1066222c, +0x1003b232, +0x40830000, +0x00050844, +0x10a032f5, +0x48000001, +0x0003001e, +0x1063222d, +0x48000000, +0x00050047, +0x00060070, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x38b10000, +0x00098200, +0x820efff8, +0x48000001, +0x0003001f, +0x81110000, +0x00098200, +0x1063222d, +0x108042f1, +0x3a8efff8, +0x10740321, +0x39800000, +0x00098200, +0x10940b21, +0x48000000, +0x00050045, +0x00060071, +0x280b0008, +0x108e0301, +0x41800000, +0x00050844, +0x1004b232, +0x1064222c, +0x40800000, +0x00050844, +0x38aefff8, +0x820efff8, +0x48000001, +0x00030020, +0x1063222d, +0x3a8efff8, +0x106e0321, +0x39800000, +0x00098200, +0x00000000, +0x48000000, +0x00050045, +0x00060072, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x39000008, +0x40800000, +0x00050844, +0x0006000b, +0x108e4300, +0x7c885840, +0x1004b232, +0x40840000, +0x00050847, +0x40800000, +0x00050844, +0x10041afd, +0x39080008, +0x4c010b82, +0x10641a78, +0x48000000, +0x0005000b, +0x00060073, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x39000008, +0x40800000, +0x00050844, +0x0006000b, +0x108e4300, +0x7c885840, +0x1004b232, +0x40840000, +0x00050847, +0x40800000, +0x00050844, +0x10041afc, +0x39080008, +0x4c010b82, +0x10641a78, +0x48000000, +0x0005000b, +0x00060074, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003ba34, +0x40800000, +0x00050844, +0x80030000, +0x00098200, +0x106002f1, +0x48000000, +0x00050047, +0x00060075, +0x280b0008, +0x106e0301, +0x40820000, +0x00050844, +0x00000000, +0x1003ba34, +0x3a8efff8, +0x40800000, +0x00050844, +0x80030000, +0x00098200, +0x39800000, +0x00098200, +0x89030000, +0x00098200, +0x39200000, +0x00098200, +0x28000000, +0x820efff8, +0x106042f1, +0x7d8c489e, +0x10740321, +0x48000000, +0x00050045, +0x00060076, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x280b0008, +0x106e0301, +0x40820000, +0x00050844, +0x1003b232, +0x38910000, +0x00098200, +0x40800000, +0x00050844, +0x10001afa, +0x38a00001, +0x280000ff, +0x98040000, +0x41810000, +0x00050844, +0x00060077, +0x7e439378, +0x91d20000, +0x00098200, +0x9201000c, +0x48000001, +0x00030021, +0x81d20000, +0x00098200, +0x10771a2d, +0x48000000, +0x00050047, +0x00060078, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x00000000, +0x7c004040, +0x40800001, +0x0005084d, +0x280b0010, +0x10ae1301, +0x106e0301, +0x41800000, +0x00050844, +0x108e0b01, +0x3920ffff, +0x41820000, +0x00050801, +0x1005b232, +0x40800000, +0x00050844, +0x11202afa, +0x0006000b, +0x1004b232, +0x40800000, +0x00050844, +0x1003ba34, +0x110022fa, +0x40800000, +0x00050844, +0x80030000, +0x00098200, +0x7c004840, +0x7cc90214, +0x41800000, +0x00050805, +0x0006000c, +0x2c080000, +0x7cc80214, +0x40810000, +0x00050807, +0x0006000d, +0x7ca84851, +0x38830000, +0x00098200, +0x38a50001, +0x7c844214, +0x7ca0281e, +0x48000000, +0x00050077, +0x0006000f, +0x7c004800, +0x38c60001, +0x7d26005e, +0x48000000, +0x0005000c, +0x00060011, +0x2c860000, +0x7d00309e, +0x7d00411e, +0x39080001, +0x48000000, +0x0005000d, +0x00060079, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x00000000, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1004b232, +0x40800000, +0x00050844, +0x1003ba34, +0x10a022fa, +0x40800000, +0x00050844, +0x80030000, +0x00098200, +0x2c050000, +0x81110000, +0x00098200, +0x40810000, +0x00050802, +0x28000001, +0x3925ffff, +0x41800000, +0x00050802, +0x7c882840, +0x40820000, +0x00050844, +0x88030000, +0x00098200, +0x80910000, +0x00098200, +0x41840000, +0x00050844, +0x0006000b, +0x28090000, +0x7c0449ae, +0x3929ffff, +0x40820000, +0x0005080b, +0x48000000, +0x00050077, +0x0006000c, +0x38710000, +0x00098200, +0x10771a2d, +0x48000000, +0x00050047, +0x0006007a, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x00000000, +0x1003ba34, +0x81110000, +0x00098200, +0x40800000, +0x00050844, +0x80a30000, +0x00098200, +0x38630000, +0x00098200, +0x80910000, +0x00098200, +0x39200000, +0x7c082840, +0x38c5ffff, +0x41800000, +0x00050844, +0x0006000b, +0x2c060000, +0x7d0348ae, +0x41800000, +0x00050877, +0x7d0431ae, +0x38c6ffff, +0x39290001, +0x48000000, +0x0005000b, +0x0006007b, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003ba34, +0x81110000, +0x00098200, +0x40800000, +0x00050844, +0x80a30000, +0x00098200, +0x38630000, +0x00098200, +0x80910000, +0x00098200, +0x7c082840, +0x39200000, +0x41800000, +0x00050844, +0x0006000b, +0x7c092840, +0x7d0348ae, +0x40800000, +0x00050877, +0x00000000, +0x3808ffbf, +0x69060020, +0x2800001a, +0x7d06401e, +0x7d0449ae, +0x39290001, +0x48000000, +0x0005000b, +0x0006007c, +0x80110000, +0x00098200, +0x81110000, +0x00098200, +0x7c004040, +0x40800001, +0x0005084d, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003ba34, +0x81110000, +0x00098200, +0x40800000, +0x00050844, +0x80a30000, +0x00098200, +0x38630000, +0x00098200, +0x80910000, +0x00098200, +0x7c082840, +0x39200000, +0x41800000, +0x00050844, +0x0006000b, +0x7c092840, +0x7d0348ae, +0x40800000, +0x00050877, +0x3808ff9f, +0x69060020, +0x2800001a, +0x7d06401e, +0x7d0449ae, +0x39290001, +0x48000000, +0x0005000b, +0x0006007d, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003c234, +0x40800000, +0x00050844, +0x48000001, +0x00030022, +0x10601af1, +0x48000000, +0x00050047, +0x0006007e, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x00000000, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x0006007f, +0x10601af1, +0x48000000, +0x00050047, +0x00060080, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x39000008, +0x0006000b, +0x108e4300, +0x7c885840, +0x1004b232, +0x40840000, +0x0005087f, +0x40800000, +0x00050844, +0x1084dae0, +0x7c632038, +0x39080008, +0x48000000, +0x0005000b, +0x00060081, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x39000008, +0x0006000b, +0x108e4300, +0x7c885840, +0x1004b232, +0x40840000, +0x0005087f, +0x40800000, +0x00050844, +0x1084dae0, +0x7c632378, +0x39080008, +0x48000000, +0x0005000b, +0x00060082, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x39000008, +0x0006000b, +0x108e4300, +0x7c885840, +0x1004b232, +0x40840000, +0x0005087f, +0x00000000, +0x40800000, +0x00050844, +0x1084dae0, +0x7c632278, +0x39080008, +0x48000000, +0x0005000b, +0x00060083, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x5460403e, +0x5060c00e, +0x5060c42e, +0x106002f1, +0x48000000, +0x00050047, +0x00060084, +0x280b0008, +0x106e0301, +0x41800000, +0x00050844, +0x1003b232, +0x40800000, +0x00050844, +0x1063dae0, +0x7c6018f8, +0x106002f1, +0x48000000, +0x00050047, +0x00060085, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000b232, +0x40830000, +0x00050844, +0x1084dae0, +0x1063dae0, +0x548406fe, +0x7c602030, +0x106002f1, +0x48000000, +0x00050047, +0x00060086, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000b232, +0x40830000, +0x00050844, +0x1084dae0, +0x1063dae0, +0x548406fe, +0x7c602430, +0x106002f1, +0x48000000, +0x00050047, +0x00060087, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000b232, +0x40830000, +0x00050844, +0x1084dae0, +0x1063dae0, +0x548406fe, +0x7c602630, +0x106002f1, +0x48000000, +0x00050047, +0x00060088, +0x00000000, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000b232, +0x40830000, +0x00050844, +0x1084dae0, +0x1063dae0, +0x5c60203e, +0x106002f1, +0x48000000, +0x00050047, +0x00060089, +0x280b0010, +0x106e0301, +0x108e0b01, +0x41800000, +0x00050844, +0x1003222c, +0x1000b232, +0x40830000, +0x00050844, +0x1084dae0, +0x1063dae0, +0x7c8400d0, +0x5c60203e, +0x106002f1, +0x48000000, +0x00050047, +0x00060044, +0x80ca0000, +0x00098200, +0x7d0e5a14, +0x820efff8, +0x38080000, +0x00098200, +0x81320000, +0x00098200, +0x9201000c, +0x7c004840, +0x91d20000, +0x00098200, +0x91120000, +0x00098200, +0x7e439378, +0x41810000, +0x00050805, +0x7cc903a6, +0x4e800421, +0x81d20000, +0x00098200, +0x2c030000, +0x546c1800, +0x000900a1, +0x3a8efff8, +0x41810000, +0x00050845, +0x0006000b, +0x80120000, +0x00098200, +0x814efffc, +0x7d6e0050, +0x40820000, +0x00050828, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00060028, +0x00000000, +0x72000000, +0x00090200, +0x56080038, +0x40820000, +0x00050803, +0x80f0fffc, +0x54e8dd78, +0x0006000d, +0x7d287050, +0x48000000, +0x00050024, +0x0006000f, +0x38800000, +0x00098200, +0x48000001, +0x00030000, +0x81d20000, +0x00098200, +0x7c000000, +0x48000000, +0x0005000b, +0x0006004d, +0x7ea802a6, +0x91d20000, +0x00098200, +0x7c0e5a14, +0x9201000c, +0x90120000, +0x00098200, +0x7e439378, +0x48000001, +0x00030023, +0x81d20000, +0x00098200, +0x7ea803a6, +0x80120000, +0x00098200, +0x7d6e0050, +0x814efffc, +0x4e800020, +0x0006008a, +0x00000000, +0x7c810808, +0x00000000, +0x0006008b, +0x88d10000, +0x00098200, +0x70c00000, +0x00090200, +0x41820000, +0x00050801, +0x0006000f, +0x39080000, +0x00098200, +0x7c11402e, +0x7c0903a6, +0x4e800420, +0x0006008c, +0x88d10000, +0x00098200, +0x81310000, +0x00098200, +0x70c00000, +0x00090200, +0x54c007c0, +0x000900ab, +0x40820000, +0x0005080f, +0x2c800000, +0x3529ffff, +0x41860000, +0x0005080f, +0x91310000, +0x00098200, +0x41820000, +0x00050801, +0x40840000, +0x0005080f, +0x0006000b, +0x7e439378, +0x92610008, +0x7e048378, +0x91d20000, +0x00098200, +0x48000001, +0x00030024, +0x0006000d, +0x81d20000, +0x00098200, +0x0006000e, +0x00000000, +0x80f0fffc, +0x54e815ba, +0x54ea5d78, +0x39080000, +0x00098200, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006008d, +0x3a100004, +0x826affec, +0x48000000, +0x0005000e, +0x0006008e, +0x00000000, +0x7c810808, +0x00000000, +0x0006008f, +0x7e048378, +0x00000000, +0x48000000, +0x00050001, +0x00000000, +0x00060090, +0x00000000, +0x62040001, +0x0006000b, +0x00000000, +0x7c0e5a14, +0x9201000c, +0x7e439378, +0x91d20000, +0x00098200, +0x7e8ea050, +0x90120000, +0x00098200, +0x48000001, +0x00030025, +0x81d20000, +0x00098200, +0x80120000, +0x00098200, +0x9361000c, +0x7d6e0050, +0x7e8ea214, +0x814efffc, +0x7c6903a6, +0x4e800420, +0x00060091, +0x00000000, +0x7c810808, +0x00000000, +0x00060092, +0x00000000, +0x7c810808, +0x00000000, +0x00060093, +0x7ca802a6, +0x48000001, +0x0005005a, +0x7ca803a6, +0x1064222c, +0x4e800020, +0x00060094, +0x1064222c, +0x0006005a, +0x5469657e, +0x3529fc01, +0x3900ffff, +0x28890033, +0x20090034, +0x41850000, +0x00050801, +0x3cc0fff0, +0x7d000030, +0x7cc84e30, +0x7c890078, +0x7c664078, +0x7d293378, +0x7c66fe70, +0x7d293039, +0x7c840038, +0x7c634038, +0x7c002010, +0x7c04009e, +0x7d081910, +0x7d03409e, +0x1088022d, +0x4e800020, +0x0006000b, +0x4d810020, +0x5469007e, +0x7c60fe70, +0x7d292378, +0x3d003ff0, +0x7d290039, +0x38000000, +0x7d00409e, +0x5103007e, +0x1083022d, +0x4e800020, +0x00060095, +0x1064222c, +0x0006005c, +0x5469657e, +0x3529fc01, +0x3900ffff, +0x28890033, +0x20090034, +0x41850000, +0x00050801, +0x3cc0fff0, +0x7d000030, +0x7cc84e30, +0x7c890078, +0x7c664078, +0x7d293378, +0x7c66fe70, +0x7d293079, +0x7c840038, +0x7c634038, +0x7c002010, +0x7c04009e, +0x7d081910, +0x7d03409e, +0x1088022d, +0x4e800020, +0x0006000b, +0x4d810020, +0x5469007e, +0x7c60fe70, +0x7d292378, +0x3d003ff0, +0x7d290079, +0x38000000, +0x7d00409e, +0x5103007e, +0x1083022d, +0x4e800020, +0x00000000, +0x00060096, +0x1064222c, +0x00060097, +0x5469657e, +0x3529fc01, +0x3900ffff, +0x28890033, +0x20090034, +0x41850000, +0x00050801, +0x3cc0fff0, +0x7d000030, +0x7cc84e30, +0x1008022d, +0x10840211, +0x4e800020, +0x0006000b, +0x4d810020, +0x54680000, +0x38000000, +0x1088022d, +0x4e800020, +0x00000000, +0x00060096, +0x00060097, +0x00000000, +0x00060098, +0x00000000, +0x7c810808, +0x00000000, +0x00060099, +0x00000000, +0x7c810808, +0x00000000, +0x0006009a, +0x1083222d, +0x28070001, +0x10c5322d, +0x41820000, +0x00050801, +0x41810000, +0x00050802, +0x108432e0, +0x1064222c, +0x4e800020, +0x0006000b, +0x108432e1, +0x1064222c, +0x4e800020, +0x0006000c, +0x28070003, +0x41820000, +0x00050801, +0x41810000, +0x00050802, +0x108432e8, +0x1064222c, +0x4e800020, +0x0006000b, +0x108432e9, +0x1064222c, +0x4e800020, +0x0006000c, +0x28070005, +0x41820000, +0x00050801, +0x41810000, +0x00050802, +0x10a42217, +0x108432e9, +0x11463217, +0x7d6802a6, +0x48000001, +0x00050094, +0x7d6803a6, +0x108452e8, +0x108522e1, +0x1064222c, +0x4e800020, +0x0006000b, +0x48000000, +0x0003001b, +0x0006000c, +0x28070007, +0x41820000, +0x00050801, +0x41810000, +0x00050802, +0x6c638000, +0x4e800020, +0x0006000b, +0x5463007e, +0x4e800020, +0x0006000c, +0x7c810808, +0x0006009b, +0x00000000, +0x7c810808, +0x00000000, +0x00080000, +0x00000000, +0x100ea300, +0x3a100004, +0x110e6300, +0x3cd00000, +0x00098200, +0x8130fffc, +0x1140422c, +0x552993ba, +0x100ab232, +0x7d293214, +0x40830000, +0x00050834, +0x100042ed, +0x00000000, +0x108042ee, +0x4c212b82, +0x00000000, +0x7e09805e, +0x00000000, +0x7e10485e, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x108ea300, +0x3a100004, +0x10ae6300, +0x3cd00000, +0x00098200, +0x8130fffc, +0x11442a2c, +0x552993ba, +0x100ab232, +0x7d293214, +0x40830000, +0x00050805, +0x10042aee, +0x00000000, +0x7e09805e, +0x00000000, +0x7e10485e, +0x00000000, +0x0006000b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x10042a34, +0x7d4650f8, +0x28860000, +0x00090200, +0x4f830342, +0x2b060000, +0x00090200, +0x4fa02902, +0x7e158378, +0x00000000, +0x7e09875e, +0x00000000, +0x7d304f5e, +0x00000000, +0x4f9ceb82, +0x00000000, +0x7e0980de, +0x00000000, +0x7e1048de, +0x00000000, +0x419c0000, +0x0005080b, +0x41980000, +0x0005080b, +0x81240000, +0x00098200, +0x38c00000, +0x00098200, +0x28090000, +0x41820000, +0x0005080b, +0x89290000, +0x00098200, +0x71290000, +0x00090200, +0x40820000, +0x0005080b, +0x7eb0ab78, +0x48000000, +0x00050039, +0x00000000, +0x100ea300, +0x558c007e, +0x000900ab, +0x80f00000, +0x218cfffc, +0x3a100004, +0x7d0f602e, +0x3cd00000, +0x00098200, +0x54e993ba, +0x1117422d, +0x7d293214, +0x10004234, +0x00000000, +0x7e0980de, +0x00000000, +0x7e1048de, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100ea300, +0x3a100004, +0x110f6300, +0x3cd00000, +0x00098200, +0x80f0fffc, +0x1000b232, +0x40800000, +0x00050805, +0x100042ee, +0x0006000b, +0x54e993ba, +0x7d293214, +0x00000000, +0x7e09805e, +0x0006000f, +0x00000000, +0x7e10485e, +0x00000000, +0x0006000d, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x0006000f, +0x54e993ba, +0x7e093214, +0x48000000, +0x0005000d, +0x00000000, +0x7c0ea02e, +0x558800fe, +0x000900ab, +0x80f00000, +0x3a100004, +0x7d0840f8, +0x3cd00000, +0x00098200, +0x7c004040, +0x54e993ba, +0x7d293214, +0x00000000, +0x7e09809e, +0x00000000, +0x7e10489e, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100e6300, +0x111ad200, +0x80f00000, +0x10004232, +0x3a100004, +0x00000000, +0x3cd00000, +0x00098200, +0x54e993ba, +0x7d293214, +0x00000000, +0x7e09801e, +0x00000000, +0x7e10481e, +0x00000000, +0x40800000, +0x00050801, +0x00000000, +0x41800000, +0x00050801, +0x00000000, +0x3e100000, +0x00098200, +0x54e993ba, +0x100ea320, +0x7e104a14, +0x0006000b, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x100e6300, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x7c0e602e, +0x21000000, +0x00098200, +0x7c004114, +0x7c0ea12e, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100e6300, +0x1000b232, +0x40800000, +0x0005083c, +0x100002e6, +0x80f00000, +0x3a100004, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x106e6300, +0x1003ba34, +0x40800000, +0x00050802, +0x80630000, +0x00098200, +0x0006000b, +0x80f00000, +0x3a100004, +0x10001af1, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000c, +0x1003c234, +0x40800000, +0x0005083e, +0x00000000, +0x81230000, +0x00098200, +0x28090000, +0x40820000, +0x00050809, +0x0006000d, +0x00000000, +0x0006003f, +0x48000001, +0x00030022, +0x48000000, +0x0005000b, +0x00000000, +0x00060013, +0x88090000, +0x00098200, +0x70000000, +0x00090200, +0x40820000, +0x0005080d, +0x48000000, +0x0005003e, +0x00000000, +0x100e5300, +0x1000b232, +0x110f5b00, +0x40800000, +0x0005083a, +0x00000000, +0x110e5300, +0x1008b232, +0x100f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x100e5300, +0x110e5b00, +0x1120422c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x80f00000, +0x3a100004, +0x100042e0, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100e5300, +0x1000b232, +0x110f5b00, +0x40800000, +0x0005083a, +0x00000000, +0x110e5300, +0x1008b232, +0x100f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x100e5300, +0x110e5b00, +0x1120422c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x80f00000, +0x3a100004, +0x100042e1, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100e5300, +0x1000b232, +0x110f5b00, +0x40800000, +0x0005083a, +0x00000000, +0x110e5300, +0x1008b232, +0x100f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x100e5300, +0x110e5b00, +0x1120422c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x80f00000, +0x3a100004, +0x100042e8, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100e5300, +0x1000b232, +0x110f5b00, +0x40800000, +0x0005083a, +0x00000000, +0x110e5300, +0x1008b232, +0x100f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x100e5300, +0x110e5b00, +0x1120422c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x80f00000, +0x3a100004, +0x100042e9, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x118e5300, +0x100cb232, +0x12af5b00, +0x40800000, +0x0005083a, +0x00000000, +0x12ae5300, +0x1015b232, +0x118f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x118e5300, +0x12ae5b00, +0x112caa2c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x0006009c, +0x108caae9, +0x48000001, +0x00050094, +0x1004aae8, +0x80f00000, +0x3a100004, +0x100c02e1, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x118e5300, +0x100cb232, +0x12af5b00, +0x40800000, +0x0005083a, +0x00000000, +0x12ae5300, +0x1015b232, +0x118f5b00, +0x40800000, +0x0005083b, +0x00000000, +0x118e5300, +0x12ae5b00, +0x112caa2c, +0x1009b232, +0x40830000, +0x0005083d, +0x00000000, +0x48000000, +0x0005009c, +0x00000000, +0x108e5300, +0x10ce5b00, +0x1066222c, +0x1003b232, +0x10a6322c, +0x40830000, +0x0005083d, +0x48000001, +0x0003001b, +0x1083222d, +0x108ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7caa5850, +0x91d20000, +0x00098200, +0x7c8e5a14, +0x7d555378, +0x0006002a, +0x9201000c, +0x7e439378, +0x54a500fe, +0x000900ab, +0x48000001, +0x00030026, +0x28030000, +0x81d20000, +0x00098200, +0x40820000, +0x00050835, +0x100eab00, +0x100ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x5588007e, +0x000900ab, +0x2108fffc, +0x7c0f402e, +0x1017022d, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x5588007e, +0x000900ab, +0x2108fffc, +0x7c0f402e, +0x39200000, +0x00098200, +0x1009022d, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x558800fe, +0x000900ab, +0x7d080734, +0x80f00000, +0x3a100004, +0x100042f1, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x100f6300, +0x80f00000, +0x3a100004, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x558800fe, +0x000900ab, +0x7d0040f8, +0x80f00000, +0x3a100004, +0x7c0ea12e, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x134ea320, +0x3a940008, +0x0006000b, +0x134ea320, +0x7c146000, +0x3a940008, +0x41800000, +0x0005080b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x814efffc, +0x558c007e, +0x000900ab, +0x398c0000, +0x00098200, +0x7d4a602e, +0x810a0000, +0x00098200, +0x10080301, +0x100ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x814efffc, +0x5694007e, +0x000900ab, +0x3a940000, +0x00098200, +0x110e6300, +0x7d4aa02e, +0x88ca0000, +0x00098200, +0x808a0000, +0x00098200, +0x70c60000, +0x00090200, +0x880a0000, +0x00098200, +0x1128422c, +0x11040321, +0x28800000, +0x4c423382, +0x39290000, +0x00098200, +0x40820000, +0x00050802, +0x0006000b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000c, +0x28090000, +0x00090200, +0x40800000, +0x0005080b, +0x88c80000, +0x00098200, +0x70c60000, +0x00090200, +0x38710000, +0x00098200, +0x41820000, +0x0005080b, +0x48000001, +0x00030027, +0x48000000, +0x0005000b, +0x00000000, +0x80f00000, +0x3a100004, +0x814efffc, +0x5588007e, +0x000900ab, +0x5694007e, +0x000900ab, +0x2108fffc, +0x3a940000, +0x00098200, +0x7d0f402e, +0x7d4aa02e, +0x1117422d, +0x88ca0000, +0x00098200, +0x808a0000, +0x00098200, +0x70c60000, +0x00090200, +0x88c80000, +0x00098200, +0x892a0000, +0x00098200, +0x11040321, +0x40820000, +0x00050802, +0x0006000b, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000c, +0x70c60000, +0x00090200, +0x28890000, +0x4c423382, +0x38710000, +0x00098200, +0x41820000, +0x0005080b, +0x48000001, +0x00030027, +0x48000000, +0x0005000b, +0x00000000, +0x80f00000, +0x3a100004, +0x814efffc, +0x5694007e, +0x000900ab, +0x3a940000, +0x00098200, +0x100f6300, +0x7d4aa02e, +0x810a0000, +0x00098200, +0x10080321, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80f00000, +0x3a100004, +0x814efffc, +0x5694007e, +0x000900ab, +0x3a940000, +0x00098200, +0x558000fe, +0x000900ab, +0x7d4aa02e, +0x7c0000f8, +0x810a0000, +0x00098200, +0x90080000, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x81120000, +0x00098200, +0x5580007e, +0x000900ab, +0x7e100214, +0x3e100000, +0x00098200, +0x91d20000, +0x00098200, +0x28080000, +0x7e439378, +0x41820000, +0x00050801, +0x7c8ea214, +0x48000001, +0x00030028, +0x81d20000, +0x00098200, +0x0006000b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x5588007e, +0x000900ab, +0x91d20000, +0x00098200, +0x2108fffc, +0x9201000c, +0x7c8f402e, +0x7e439378, +0x80aefffc, +0x48000001, +0x00030029, +0x81d20000, +0x00098200, +0x10791a2d, +0x106ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x80110000, +0x00098200, +0x7e439378, +0x81110000, +0x00098200, +0x91d20000, +0x00098200, +0x7c004040, +0x9201000c, +0x40800000, +0x00050805, +0x0006000b, +0x00000000, +0x5584ed7e, +0x558596fe, +0x2c0407ff, +0x39000801, +0x7c88209e, +0x48000001, +0x0003002a, +0x00000000, +0x5588007e, +0x000900ab, +0x2108fffc, +0x7c8f402e, +0x48000001, +0x0003002b, +0x00000000, +0x81d20000, +0x00098200, +0x10781a2d, +0x106ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x7d956378, +0x48000001, +0x0003002c, +0x7eacab78, +0x7e439378, +0x48000000, +0x0005000b, +0x00000000, +0x812efffc, +0x5588007e, +0x000900ab, +0x81490000, +0x00098200, +0x2108fffc, +0x7d6f402e, +0x00000000, +0x48000000, +0x0005009d, +0x00000000, +0x48000000, +0x0005009e, +0x00000000, +0x114e5300, +0x116e5b00, +0x100ac234, +0x40800000, +0x0005082f, +0x100bb232, +0x40800000, +0x00050805, +0x11205af5, +0x800a0000, +0x00098200, +0x11004af1, +0x7c004840, +0x108b42ee, +0x810a0000, +0x00098200, +0x4c212a02, +0x55291800, +0x000900a1, +0x40810000, +0x0005082f, +0x11084b00, +0x1008d234, +0x41800000, +0x00050802, +0x0006000b, +0x110ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000c, +0x812a0000, +0x00098200, +0x28090000, +0x41820000, +0x0005080b, +0x88090000, +0x00098200, +0x70000000, +0x00090200, +0x40820000, +0x0005080b, +0x48000000, +0x0005002f, +0x0006000f, +0x100bba34, +0x41800000, +0x0005089d, +0x48000000, +0x0005002f, +0x00000000, +0x114e5300, +0x5568007e, +0x000900ab, +0x100ac234, +0x2108fffc, +0x7d6f402e, +0x40800000, +0x0005082c, +0x0006009d, +0x800a0000, +0x00098200, +0x810b0000, +0x00098200, +0x812a0000, +0x00098200, +0x11775a2d, +0x7d080038, +0x55002800, +0x000900a1, +0x55081800, +0x000900a1, +0x7d080050, +0x7d294214, +0x0006000b, +0x10090301, +0x00090cab, +0x11090301, +0x00090cab, +0x10005a34, +0x40830000, +0x00050804, +0x1008d234, +0x41800000, +0x00050805, +0x0006000d, +0x110ea320, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000e, +0x81290000, +0x00098200, +0x28090000, +0x40820000, +0x0005080b, +0x111ad217, +0x0006000f, +0x812a0000, +0x00098200, +0x28090000, +0x41820000, +0x0005080d, +0x88090000, +0x00098200, +0x70000000, +0x00090200, +0x00000000, +0x40820000, +0x0005080d, +0x48000000, +0x0005002d, +0x00000000, +0x114e5300, +0x556000fe, +0x000900ab, +0x100ac234, +0x40800000, +0x0005082e, +0x810a0000, +0x00098200, +0x812a0000, +0x00098200, +0x7c004040, +0x40800000, +0x0005082e, +0x11095b00, +0x1008d234, +0x41800000, +0x00050805, +0x0006000b, +0x80f00000, +0x3a100004, +0x110ea320, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x812a0000, +0x00098200, +0x28090000, +0x41820000, +0x0005080b, +0x89290000, +0x00098200, +0x71290000, +0x00090200, +0x40820000, +0x0005080b, +0x48000000, +0x0005002e, +0x00000000, +0x114e5300, +0x116e5b00, +0x100ac234, +0x40800000, +0x00050833, +0x100bb232, +0x40800000, +0x00050805, +0x11205af5, +0x12aea300, +0x800a0000, +0x00098200, +0x11004af1, +0x7c004840, +0x108b42ee, +0x810a0000, +0x00098200, +0x4c212a02, +0x55201800, +0x000900a1, +0x40810000, +0x00050833, +0x88ca0000, +0x00098200, +0x11280300, +0x1009d234, +0x41800000, +0x00050803, +0x0006000b, +0x70c90000, +0x00090200, +0x12a80320, +0x40820000, +0x00050807, +0x0006000c, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000d, +0x812a0000, +0x00098200, +0x28090000, +0x41820000, +0x0005080b, +0x89290000, +0x00098200, +0x71290000, +0x00090200, +0x40820000, +0x0005080b, +0x48000000, +0x00050033, +0x0006000f, +0x100bba34, +0x41800000, +0x0005089e, +0x48000000, +0x00050033, +0x00060011, +0x00000000, +0x80110000, +0x00098200, +0x54c607b8, +0x91510000, +0x00098200, +0x98ca0000, +0x00098200, +0x900a0000, +0x00098200, +0x48000000, +0x0005000c, +0x00000000, +0x114e5300, +0x5568007e, +0x000900ab, +0x100ac234, +0x2108fffc, +0x7d6f402e, +0x40800000, +0x00050830, +0x0006009e, +0x800a0000, +0x00098200, +0x810b0000, +0x00098200, +0x812a0000, +0x00098200, +0x11775a2d, +0x9b6a0000, +0x00098200, +0x7d080038, +0x12aea300, +0x55002800, +0x000900a1, +0x55081800, +0x000900a1, +0x7d080050, +0x88ca0000, +0x00098200, +0x7d294214, +0x0006000b, +0x10090301, +0x00090cab, +0x11090301, +0x00090cab, +0x10005a34, +0x40830000, +0x00050805, +0x1008d234, +0x41800000, +0x00050804, +0x0006000c, +0x70c00000, +0x00090200, +0x12a90321, +0x00090cab, +0x40820000, +0x00050807, +0x0006000d, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000e, +0x810a0000, +0x00098200, +0x00000000, +0x28080000, +0x41820000, +0x0005080c, +0x88080000, +0x00098200, +0x70000000, +0x00090200, +0x40820000, +0x0005080c, +0x48000000, +0x00050031, +0x0006000f, +0x81290000, +0x00098200, +0x28090000, +0x40820000, +0x0005080b, +0x810a0000, +0x00098200, +0x38b10000, +0x00098200, +0x9201000c, +0x7e439378, +0x28080000, +0x91d20000, +0x00098200, +0x41820000, +0x00050806, +0x88080000, +0x00098200, +0x70000000, +0x00090200, +0x41820000, +0x00050831, +0x00060010, +0x7d445378, +0x11650321, +0x48000001, +0x0003002d, +0x81d20000, +0x00098200, +0x12a30321, +0x48000000, +0x0005000d, +0x00060011, +0x80110000, +0x00098200, +0x54c607b8, +0x91510000, +0x00098200, +0x00000000, +0x98ca0000, +0x00098200, +0x900a0000, +0x00098200, +0x48000000, +0x0005000d, +0x00000000, +0x114e5300, +0x556000fe, +0x000900ab, +0x100ac234, +0x40800000, +0x00050832, +0x810a0000, +0x00098200, +0x812a0000, +0x00098200, +0x88ca0000, +0x00098200, +0x7c004040, +0x12aea300, +0x40800000, +0x00050832, +0x11095b00, +0x1008d234, +0x41800000, +0x00050805, +0x0006000b, +0x70c00000, +0x00090200, +0x12a95b20, +0x40820000, +0x00050807, +0x0006000c, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x810a0000, +0x00098200, +0x28080000, +0x41820000, +0x0005080b, +0x89080000, +0x00098200, +0x71080000, +0x00090200, +0x40820000, +0x0005080b, +0x48000000, +0x00050032, +0x00060011, +0x80110000, +0x00098200, +0x54c607b8, +0x91510000, +0x00098200, +0x98ca0000, +0x00098200, +0x00000000, +0x900a0000, +0x00098200, +0x48000000, +0x0005000c, +0x00000000, +0x7e8ea214, +0x0006000b, +0x7ccf6214, +0x8094fffc, +0x3413fff8, +0x80c60004, +0x540500fe, +0x000900ab, +0x41820000, +0x00050804, +0x7ca53214, +0x81240000, +0x00098200, +0x54c81800, +0x000900a1, +0x88c40000, +0x00098200, +0x7c054840, +0x7d340214, +0x80040000, +0x00098200, +0x41810000, +0x00050805, +0x7d080214, +0x70c00000, +0x00090200, +0x0006000d, +0x10140301, +0x3a940008, +0x7c944800, +0x10080321, +0x39080008, +0x41840000, +0x0005080d, +0x40820000, +0x00050807, +0x0006000e, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x91d20000, +0x00098200, +0x7e439378, +0x9201000c, +0x7d956378, +0x48000001, +0x0003002e, +0x7eacab78, +0x48000000, +0x0005000b, +0x00060011, +0x80110000, +0x00098200, +0x54c607b8, +0x91510000, +0x00098200, +0x98ca0000, +0x00098200, +0x900a0000, +0x00098200, +0x00000000, +0x48000000, +0x0005000e, +0x00000000, +0x7d6b9a14, +0x00000000, +0x114ea300, +0x7dc97378, +0x7dcea214, +0x396bfff8, +0x100aca34, +0x39ce0008, +0x40800000, +0x00050825, +0x920efff8, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7d6b9a14, +0x00000000, +0x114ea300, +0x7e8ea214, +0x810efff8, +0x396bfff8, +0x100aca34, +0x3a940008, +0x40800000, +0x00050840, +0x00060041, +0x71000000, +0x00090200, +0x88ca0000, +0x00098200, +0x69090000, +0x00090200, +0x288b0000, +0x40820000, +0x00050807, +0x0006000b, +0x914efffc, +0x39200000, +0x2b860001, +0x41860000, +0x00050803, +0x0006000c, +0x38c90008, +0x10144b00, +0x7c865840, +0x100e4b20, +0x7cc93378, +0x40860000, +0x0005080c, +0x0006000d, +0x4c42ea02, +0x41820000, +0x00050805, +0x0006000e, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x80e8fffc, +0x54f4dd78, +0x7d147050, +0x81080000, +0x00098200, +0x81080000, +0x00098200, +0x81e80000, +0x00098200, +0x48000000, +0x0005000e, +0x00060011, +0x71200000, +0x00090200, +0x40820000, +0x0005080b, +0x00000000, +0x7dc97050, +0x810efff8, +0x71000000, +0x00090200, +0x48000000, +0x0005000b, +0x00000000, +0x3a94ffe8, +0x7dc97378, +0x114ea300, +0x7dcea214, +0x100e0b01, +0x110e1301, +0x114e1b21, +0x100aca34, +0x100e2321, +0x39600010, +0x110e2b21, +0x39ce0020, +0x40800000, +0x00050825, +0x920efff8, +0x820a0000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54f4dd78, +0x7c11402e, +0x7e947214, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7e8ea214, +0x8154fff4, +0x8174fffc, +0x800a0000, +0x00098200, +0x810a0000, +0x00098200, +0x3a100004, +0x0006000b, +0x7c0b0040, +0x55661800, +0x000900a1, +0x40800000, +0x00050805, +0x11283300, +0x1009d234, +0x80f0fffc, +0x41800000, +0x00050804, +0x10005af1, +0x396b0001, +0x3cd00000, +0x00098200, +0x11340b21, +0x54e893ba, +0x9174fffc, +0x7e083214, +0x10140321, +0x0006000d, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000e, +0x396b0001, +0x48000000, +0x0005000b, +0x0006000f, +0x810a0000, +0x00098200, +0x7d605850, +0x812a0000, +0x00098200, +0x00060010, +0x7c0b4040, +0x55662800, +0x000900a1, +0x41810000, +0x0005080d, +0x556a1800, +0x000900a1, +0x7cca3050, +0x11493300, +0x7cc93214, +0x100ad234, +0x80f0fffc, +0x41800000, +0x00050807, +0x10c60301, +0x00090cab, +0x3d300000, +0x00098200, +0x11540b21, +0x7d6b0214, +0x54e893ba, +0x10d40321, +0x396b0001, +0x7e084a14, +0x9174fffc, +0x48000000, +0x0005000d, +0x00060011, +0x00000000, +0x396b0001, +0x48000000, +0x00050010, +0x00000000, +0x7e8ea214, +0x3920ffe8, +0x11144b00, +0x8134fff0, +0x80d4fff8, +0x1008422c, +0x2c090000, +0x00098200, +0x2c800000, +0x00098200, +0x2f060000, +0x00098200, +0x40860000, +0x00050805, +0x89080000, +0x00098200, +0x4c42d202, +0x2f880000, +0x00098200, +0x5580007e, +0x000900ab, +0x4c42f202, +0x7cd00214, +0x40820000, +0x00050805, +0x9374fffc, +0x3e060000, +0x00098200, +0x0006000b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x38000000, +0x00098200, +0x39000000, +0x00098200, +0x9810ffff, +0x3e060000, +0x00098200, +0x99100003, +0x48000000, +0x0005000b, +0x00000000, +0x800efff8, +0x7d6e5a14, +0x7e8ea214, +0x396b0000, +0x00098200, +0x7d345214, +0x38cefff8, +0x7d605850, +0x288a0000, +0x7d0b3051, +0x41860000, +0x00050805, +0x3929fff0, +0x40810000, +0x00050802, +0x0006000b, +0x100b0301, +0x396b0008, +0x10140321, +0x7c144840, +0x7c8b3040, +0x40800000, +0x00050803, +0x3a940008, +0x41840000, +0x0005080b, +0x0006000c, +0x13540321, +0x7c144840, +0x3a940008, +0x41800000, +0x0005080c, +0x0006000d, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000f, +0x80120000, +0x00098200, +0x3a600008, +0x40810000, +0x0005080d, +0x7d344214, +0x7c090040, +0x3a680008, +0x41810000, +0x00050807, +0x00060010, +0x100b0301, +0x396b0008, +0x10140321, +0x7c0b3040, +0x3a940008, +0x41800000, +0x00050810, +0x48000000, +0x0005000d, +0x00060011, +0x7e439378, +0x92920000, +0x00098200, +0x7eae5850, +0x91d20000, +0x00098200, +0x7e8ea050, +0x9201000c, +0x550400fe, +0x000900ab, +0x48000001, +0x00030000, +0x81d20000, +0x00098200, +0x00000000, +0x7e8ea214, +0x7d6eaa14, +0x38cefff8, +0x48000000, +0x00050010, +0x00000000, +0x7d8c9a14, +0x00000000, +0x820efff8, +0x7e8ea214, +0x7d936378, +0x0006000b, +0x72000000, +0x00090200, +0x6a080000, +0x00090200, +0x40820000, +0x0005089f, +0x00060017, +0x80f0fffc, +0x2c0c0008, +0x392efff8, +0x396cfff8, +0x54ea5d78, +0x41820000, +0x00050803, +0x39000000, +0x0006000c, +0x38c80008, +0x10144300, +0x7c065800, +0x10094320, +0x41820000, +0x00050803, +0x39060008, +0x10143300, +0x7c085800, +0x10093320, +0x40820000, +0x0005080c, +0x0006000d, +0x0006000f, +0x7c0a6040, +0x54f4dd78, +0x41810000, +0x00050806, +0x7dd44850, +0x810efffc, +0x80f00000, +0x3a100004, +0x81080000, +0x00098200, +0x81e80000, +0x00098200, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00060010, +0x390cfff8, +0x398c0008, +0x13494320, +0x48000000, +0x0005000f, +0x0006009f, +0x71090000, +0x00090200, +0x40820000, +0x00050818, +0x7dc87050, +0x820efff8, +0x48000000, +0x0005000b, +0x00000000, +0x820efff8, +0x7e8ea214, +0x7d936378, +0x72000000, +0x00090200, +0x6a080000, +0x00090200, +0x40820000, +0x0005089f, +0x80f0fffc, +0x392efff8, +0x54ea5d78, +0x00000000, +0x10140301, +0x10090321, +0x00000000, +0x0006000f, +0x7c0a6040, +0x54f4dd78, +0x41810000, +0x00050806, +0x7dd44850, +0x810efffc, +0x80f00000, +0x3a100004, +0x81080000, +0x00098200, +0x81e80000, +0x00098200, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00060010, +0x390cfff8, +0x398c0008, +0x13494320, +0x48000000, +0x0005000f, +0x00000000, +0x7c810808, +0x00000000, +0x7e8ea214, +0x11140301, +0x00090cab, +0x10d40301, +0x00090cab, +0x11340301, +0x00090cab, +0x00000000, +0x1008b230, +0x1386b230, +0x1089b230, +0x4c00e382, +0x4c002382, +0x41800000, +0x00050842, +0x00000000, +0x110832e0, +0x11140321, +0x00090cab, +0x00000000, +0x1006d231, +0x11140321, +0x00090cab, +0x40800000, +0x00050802, +0x10084aec, +0x0006000b, +0x00000000, +0x558c007e, +0x000900ab, +0x7d906214, +0x00000000, +0x3e0c0000, +0x00098200, +0x00000000, +0x3d8c0000, +0x00098200, +0x00000000, +0x7e0c805e, +0x00000000, +0x7e10605e, +0x00000000, +0x40810000, +0x00070800, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000c, +0x100942ec, +0x48000000, +0x0005000b, +0x00000000, +0x7c810808, +0x00000000, +0x110ea300, +0x3a94fff8, +0x1008d234, +0x41800000, +0x00050801, +0x00000000, +0x7c810808, +0x00000000, +0x5580007e, +0x000900ab, +0x7e100214, +0x3e100000, +0x00098200, +0x110ea320, +0x00000000, +0x0006000b, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7c810808, +0x00000000, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7c810808, +0x00000000, +0x5580007e, +0x000900ab, +0x7e100214, +0x3e100000, +0x00098200, +0x80f00000, +0x3a100004, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x7c810808, +0x00000000, +0x81320000, +0x00098200, +0x89100000, +0x00098200, +0x81f00000, +0x00098200, +0x7c144840, +0x55081800, +0x000900a1, +0x41810000, +0x00050820, +0x80f00000, +0x3a100004, +0x0006000c, +0x7c0b4040, +0x40810000, +0x00050803, +0x00000000, +0x7c810808, +0x00000000, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x00000000, +0x0006000d, +0x134e5b20, +0x396b0008, +0x48000000, +0x0005000c, +0x00000000, +0x7c810808, +0x00000000, +0x81320000, +0x00098200, +0x7d0e5a14, +0x7c145a14, +0x91480004, +0x38cb0000, +0x00098200, +0x81f00000, +0x00098200, +0x7c004840, +0x90c80000, +0x40800000, +0x00050820, +0x89300000, +0x00098200, +0x7dd47378, +0x7d0b4378, +0x80f00000, +0x3a100004, +0x2c090000, +0x39c80008, +0x41820000, +0x00050803, +0x0006000b, +0x7c145840, +0x10140301, +0x40800000, +0x00050804, +0x13540321, +0x3a940008, +0x0006000c, +0x3529ffff, +0x10080b21, +0x39080008, +0x40820000, +0x0005080b, +0x0006000d, +0x54e815ba, +0x54ea5d78, +0x54ec9b78, +0x7c11402e, +0x54f4dd78, +0x54eb9d78, +0x7c0903a6, +0x4e800420, +0x0006000e, +0x101ad217, +0x48000000, +0x0005000c, +0x00000000, +0x80ca0000, +0x00098200, +0x00000000, +0x80d10000, +0x00098200, +0x00000000, +0x7d145a14, +0x81320000, +0x00098200, +0x7d6e5a14, +0x91d20000, +0x00098200, +0x7c084840, +0x91720000, +0x00098200, +0x38000000, +0x00098200, +0x7cc903a6, +0x00000000, +0x808a0000, +0x00098200, +0x00000000, +0x7e439378, +0x41810000, +0x0005081f, +0x90110000, +0x00098200, +0x4e800421, +0x81120000, +0x00098200, +0x546c1800, +0x000900a1, +0x81d20000, +0x00098200, +0x38000000, +0x00098200, +0x820efff8, +0x7e8c4050, +0x90110000, +0x00098200, +0x48000000, +0x00050016, +0x00000000, +0x00010000 +}; + +enum { + GLOB_vm_returnp, + GLOB_cont_dispatch, + GLOB_vm_returnc, + GLOB_BC_RET_Z, + GLOB_vm_return, + GLOB_vm_leave_cp, + GLOB_vm_leave_unw, + GLOB_vm_unwind_c, + GLOB_vm_unwind_c_eh, + GLOB_vm_unwind_ff, + GLOB_vm_unwind_ff_eh, + GLOB_vm_growstack_c, + GLOB_vm_growstack_l, + GLOB_vm_resume, + GLOB_vm_pcall, + GLOB_vm_call, + GLOB_vm_call_dispatch, + GLOB_vmeta_call, + GLOB_vm_call_dispatch_f, + GLOB_vm_cpcall, + GLOB_vm_call_tail, + GLOB_cont_cat, + GLOB_BC_CAT_Z, + GLOB_cont_nop, + GLOB_vmeta_tgets1, + GLOB_vmeta_tgets, + GLOB_vmeta_tgetb, + GLOB_vmeta_tgetv, + GLOB_vmeta_tsets1, + GLOB_vmeta_tsets, + GLOB_vmeta_tsetb, + GLOB_vmeta_tsetv, + GLOB_vmeta_comp, + GLOB_vmeta_binop, + GLOB_cont_ra, + GLOB_cont_condt, + GLOB_cont_condf, + GLOB_vmeta_equal, + GLOB_vmeta_arith_vn, + GLOB_vmeta_arith_nv, + GLOB_vmeta_unm, + GLOB_vmeta_arith_vv, + GLOB_vmeta_len, + GLOB_BC_LEN_Z, + GLOB_vmeta_callt, + GLOB_BC_CALLT_Z, + GLOB_vmeta_for, + GLOB_ff_assert, + GLOB_fff_fallback, + GLOB_fff_res, + GLOB_ff_type, + GLOB_fff_restv, + GLOB_ff_getmetatable, + GLOB_ff_setmetatable, + GLOB_ff_rawget, + GLOB_ff_tonumber, + GLOB_ff_tostring, + GLOB_fff_gcstep, + GLOB_ff_next, + GLOB_ff_pairs, + GLOB_ff_ipairs_aux, + GLOB_ff_ipairs, + GLOB_ff_pcall, + GLOB_ff_xpcall, + GLOB_ff_coroutine_resume, + GLOB_ff_coroutine_wrap_aux, + GLOB_ff_coroutine_yield, + GLOB_ff_math_abs, + GLOB_fff_res1, + GLOB_ff_math_floor, + GLOB_vm_floor_hilo, + GLOB_ff_math_ceil, + GLOB_vm_ceil_hilo, + GLOB_ff_math_sqrt, + GLOB_ff_math_log, + GLOB_ff_math_log10, + GLOB_ff_math_exp, + GLOB_ff_math_sin, + GLOB_ff_math_cos, + GLOB_ff_math_tan, + GLOB_ff_math_asin, + GLOB_ff_math_acos, + GLOB_ff_math_atan, + GLOB_ff_math_sinh, + GLOB_ff_math_cosh, + GLOB_ff_math_tanh, + GLOB_ff_math_pow, + GLOB_ff_math_atan2, + GLOB_ff_math_fmod, + GLOB_ff_math_deg, + GLOB_ff_math_rad, + GLOB_ff_math_ldexp, + GLOB_ff_math_frexp, + GLOB_ff_math_modf, + GLOB_ff_math_min, + GLOB_ff_math_max, + GLOB_ff_string_len, + GLOB_ff_string_byte, + GLOB_ff_string_char, + GLOB_fff_newstr, + GLOB_ff_string_sub, + GLOB_ff_string_rep, + GLOB_ff_string_reverse, + GLOB_ff_string_lower, + GLOB_ff_string_upper, + GLOB_ff_table_getn, + GLOB_ff_bit_tobit, + GLOB_fff_resbit, + GLOB_ff_bit_band, + GLOB_ff_bit_bor, + GLOB_ff_bit_bxor, + GLOB_ff_bit_bswap, + GLOB_ff_bit_bnot, + GLOB_ff_bit_lshift, + GLOB_ff_bit_rshift, + GLOB_ff_bit_arshift, + GLOB_ff_bit_rol, + GLOB_ff_bit_ror, + GLOB_vm_record, + GLOB_vm_rethook, + GLOB_vm_inshook, + GLOB_cont_hook, + GLOB_vm_hotloop, + GLOB_vm_callhook, + GLOB_vm_hotcall, + GLOB_vm_exit_handler, + GLOB_vm_exit_interp, + GLOB_vm_floor, + GLOB_vm_floor_efd, + GLOB_vm_ceil_efd, + GLOB_vm_trunc_efd, + GLOB_vm_trunc_hilo, + GLOB_vm_powi, + GLOB_vm_foldfpm, + GLOB_vm_foldarith, + GLOB_vm_ffi_call, + GLOB_BC_MODVN_Z, + GLOB_BC_TGETS_Z, + GLOB_BC_TSETS_Z, + GLOB_BC_RETV_Z, + GLOB__MAX +}; +static const char *const globnames[] = { + "vm_returnp", + "cont_dispatch", + "vm_returnc", + "BC_RET_Z", + "vm_return", + "vm_leave_cp", + "vm_leave_unw", + "vm_unwind_c", + "vm_unwind_c_eh", + "vm_unwind_ff", + "vm_unwind_ff_eh", + "vm_growstack_c", + "vm_growstack_l", + "vm_resume", + "vm_pcall", + "vm_call", + "vm_call_dispatch", + "vmeta_call", + "vm_call_dispatch_f", + "vm_cpcall", + "vm_call_tail", + "cont_cat", + "BC_CAT_Z", + "cont_nop", + "vmeta_tgets1", + "vmeta_tgets", + "vmeta_tgetb", + "vmeta_tgetv", + "vmeta_tsets1", + "vmeta_tsets", + "vmeta_tsetb", + "vmeta_tsetv", + "vmeta_comp", + "vmeta_binop", + "cont_ra", + "cont_condt", + "cont_condf", + "vmeta_equal", + "vmeta_arith_vn", + "vmeta_arith_nv", + "vmeta_unm", + "vmeta_arith_vv", + "vmeta_len", + "BC_LEN_Z", + "vmeta_callt", + "BC_CALLT_Z", + "vmeta_for", + "ff_assert", + "fff_fallback", + "fff_res", + "ff_type", + "fff_restv", + "ff_getmetatable", + "ff_setmetatable", + "ff_rawget", + "ff_tonumber", + "ff_tostring", + "fff_gcstep", + "ff_next", + "ff_pairs", + "ff_ipairs_aux", + "ff_ipairs", + "ff_pcall", + "ff_xpcall", + "ff_coroutine_resume", + "ff_coroutine_wrap_aux", + "ff_coroutine_yield", + "ff_math_abs", + "fff_res1", + "ff_math_floor", + "vm_floor_hilo", + "ff_math_ceil", + "vm_ceil_hilo", + "ff_math_sqrt", + "ff_math_log", + "ff_math_log10", + "ff_math_exp", + "ff_math_sin", + "ff_math_cos", + "ff_math_tan", + "ff_math_asin", + "ff_math_acos", + "ff_math_atan", + "ff_math_sinh", + "ff_math_cosh", + "ff_math_tanh", + "ff_math_pow", + "ff_math_atan2", + "ff_math_fmod", + "ff_math_deg", + "ff_math_rad", + "ff_math_ldexp", + "ff_math_frexp", + "ff_math_modf", + "ff_math_min", + "ff_math_max", + "ff_string_len", + "ff_string_byte", + "ff_string_char", + "fff_newstr", + "ff_string_sub", + "ff_string_rep", + "ff_string_reverse", + "ff_string_lower", + "ff_string_upper", + "ff_table_getn", + "ff_bit_tobit", + "fff_resbit", + "ff_bit_band", + "ff_bit_bor", + "ff_bit_bxor", + "ff_bit_bswap", + "ff_bit_bnot", + "ff_bit_lshift", + "ff_bit_rshift", + "ff_bit_arshift", + "ff_bit_rol", + "ff_bit_ror", + "vm_record", + "vm_rethook", + "vm_inshook", + "cont_hook", + "vm_hotloop", + "vm_callhook", + "vm_hotcall", + "vm_exit_handler", + "vm_exit_interp", + "vm_floor", + "vm_floor_efd", + "vm_ceil_efd", + "vm_trunc_efd", + "vm_trunc_hilo", + "vm_powi", + "vm_foldfpm", + "vm_foldarith", + "vm_ffi_call", + "BC_MODVN_Z", + "BC_TGETS_Z", + "BC_TSETS_Z", + "BC_RETV_Z", + (const char *)0 +}; +static const char *const extnames[] = { + "lj_state_growstack", + "lj_meta_tget", + "lj_meta_tset", + "lj_meta_comp", + "lj_meta_equal", + "lj_meta_arith", + "lj_meta_len", + "lj_meta_call", + "lj_meta_for", + "lj_tab_get", + "lj_str_fromnum", + "lj_tab_next", + "lj_tab_getinth", + "lj_ffh_coroutine_wrap_err", + "sqrt", + "log", + "log10", + "exp", + "sin", + "cos", + "tan", + "asin", + "acos", + "atan", + "sinh", + "cosh", + "tanh", + "pow", + "atan2", + "fmod", + "ldexp", + "frexp", + "modf", + "lj_str_new", + "lj_tab_len", + "lj_gc_step", + "lj_dispatch_ins", + "lj_dispatch_call", + "lj_meta_cat", + "lj_gc_barrieruv", + "lj_func_closeuv", + "lj_func_newL_gc", + "lj_tab_new", + "lj_tab_dup", + "lj_gc_step_fixtop", + "lj_tab_newkey", + "lj_tab_reasize", + (const char *)0 +}; +#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V) +#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V) +#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V) +#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V) +#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V) +#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V) +#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V) +#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V) +#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V) +#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V) +#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V) +#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V) +#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V) +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx) +{ + dasm_put(Dst, 0); + dasm_put(Dst, 1, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, ~LJ_VMST_C, Dt1(->base), DISPATCH_GL(vmstate), 31-3, Dt1(->top)); + dasm_put(Dst, 55, Dt1(->cframe), Dt1(->maxstack), Dt1(->top), 31-3, Dt1(->top), ~LJ_VMST_C, Dt1(->glref), Dt2(->vmstate)); + dasm_put(Dst, 133, LJ_TISNUM+1, LJ_TFUNC, LJ_TTAB, Dt1(->base), Dt1(->glref), LJ_TSTR, LJ_TFALSE, LJ_TNIL, ~LJ_VMST_INTERP, GG_G2DISP, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), 32-3); + dasm_put(Dst, 188, Dt1(->base), Dt1(->top), Dt7(->pc), Dt1(->glref), Dt1(->status), FRAME_CP, CFRAME_RESUME, GG_G2DISP, Dt1(->cframe), Dt1(->base), LJ_TISNUM+1, Dt1(->top), LJ_TFUNC, LJ_TTAB, LJ_TSTR, Dt1(->status), FRAME_TYPE, ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate)); + dasm_put(Dst, 279, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, Dt1(->base), LJ_TISNUM+1, Dt1(->top), LJ_TFUNC, LJ_TTAB, LJ_TSTR, ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate)); + dasm_put(Dst, 376, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP, Dt7(->pc), PC2PROTO(k), Dt1(->base)); + dasm_put(Dst, 481, DISPATCH_GL(tmptv), DISPATCH_GL(tmptv), DISPATCH_GL(tmptv2), DISPATCH_GL(tmptv), Dt1(->base), FRAME_CONT, Dt1(->top), DISPATCH_GL(tmptv)); + dasm_put(Dst, 556, DISPATCH_GL(tmptv), DISPATCH_GL(tmptv2), DISPATCH_GL(tmptv), Dt1(->base), FRAME_CONT, Dt1(->top), Dt1(->base)); + dasm_put(Dst, 637, -(BCBIAS_J*4 >> 16), LJ_TTRUE, LJ_TFALSE, Dt1(->base)); + dasm_put(Dst, 706, Dt1(->base), FRAME_CONT); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 728); +#endif + dasm_put(Dst, 730, Dt1(->base)); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 738); +#else + dasm_put(Dst, 745); +#endif + dasm_put(Dst, 748, Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base)); +#if LJ_HASJIT + dasm_put(Dst, 796); +#endif + dasm_put(Dst, 798); +#if LJ_HASJIT + dasm_put(Dst, 800, BC_JFORI); +#endif + dasm_put(Dst, 803); +#if LJ_HASJIT + dasm_put(Dst, 805, BC_JFORI); +#endif + dasm_put(Dst, 808, BC_FORI, ~LJ_TNUMX, 31-3, Dt8(->upvalue), Dt6(->metatable), DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])); + dasm_put(Dst, 873, Dt6(->hmask), Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), DtB(->val), DtB(->next), LJ_TUDATA, 31-2, 4*~LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT])); + dasm_put(Dst, 929, Dt6(->metatable), Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + dasm_put(Dst, 989, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->top), (2+1)*8); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1062, Dt6(->metatable), Dt8(->upvalue[0])); +#else + dasm_put(Dst, 1071, Dt8(->upvalue[0])); +#endif + dasm_put(Dst, 1075, (3+1)*8, Dt6(->asize), Dt6(->array), 31-3, (0+1)*8, (2+1)*8, Dt6(->hmask), (0+1)*8, (0+1)*8); + dasm_put(Dst, 1139); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1152, Dt6(->metatable), Dt8(->upvalue[0])); +#else + dasm_put(Dst, 1161, Dt8(->upvalue[0])); +#endif + dasm_put(Dst, 1165, (3+1)*8, DISPATCH_GL(hookmask), 32-HOOK_ACTIVE_SHIFT, 8+FRAME_PCALL, DISPATCH_GL(hookmask), 32-HOOK_ACTIVE_SHIFT, 16+FRAME_PCALL, LJ_TTHREAD, Dt1(->status), Dt1(->cframe), Dt1(->top)); + dasm_put(Dst, 1226, LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack)); + dasm_put(Dst, 1289, Dt1(->top), FRAME_TYPE, LJ_TTRUE, FRAME_TYPE, LJ_TFALSE, Dt1(->top), (2+1)*8, 32-3); + dasm_put(Dst, 1349, Dt8(->upvalue[0].gcr), Dt1(->status), Dt1(->cframe), Dt1(->top), LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP); + dasm_put(Dst, 1408, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, 32-3, Dt1(->cframe)); + dasm_put(Dst, 1465, Dt1(->base), CFRAME_RESUME, Dt1(->top), LUA_YIELD, Dt1(->cframe), Dt1(->status), (1+1)*8, FRAME_TYPE); + dasm_put(Dst, 1530); + dasm_put(Dst, 1599); + dasm_put(Dst, 1662); + dasm_put(Dst, 1727); + dasm_put(Dst, 1797, Dt8(->upvalue[0]), DISPATCH_GL(tmptv), DISPATCH_GL(tmptv), (2+1)*8, (2+1)*8); + dasm_put(Dst, 1869, Dt5(->len)); + dasm_put(Dst, 1936, Dt5(->len), (0+1)*8, Dt5([1]), (1+1)*8, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmptv), Dt1(->base), Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 1996, Dt5(->len), sizeof(GCstr)-1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2062, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(strempty), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2121, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 2180, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 2247); + dasm_put(Dst, 2318); + dasm_put(Dst, 2406, Dt8(->f), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->base), 31-3, Dt1(->top), Dt7(->pc)); + dasm_put(Dst, 2485, FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 2527); +#endif + dasm_put(Dst, 2529, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, 31-LUA_HOOKLINE, DISPATCH_GL(hookcount), Dt1(->base), Dt1(->base)); + dasm_put(Dst, 2576, GG_DISP2STATIC); +#if LJ_HASJIT + dasm_put(Dst, 2594); +#endif + dasm_put(Dst, 2596); +#if LJ_HASJIT + dasm_put(Dst, 2599); +#endif + dasm_put(Dst, 2602); +#if LJ_HASJIT + dasm_put(Dst, 2604); +#endif + dasm_put(Dst, 2607, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 2629); +#endif + dasm_put(Dst, 2631); +#if LJ_HASJIT + dasm_put(Dst, 2633); +#endif + dasm_put(Dst, 2635); +#if LJ_HASJIT + dasm_put(Dst, 2719); +#else + dasm_put(Dst, 2742); +#endif + dasm_put(Dst, 2745); +#if LJ_HASJIT + dasm_put(Dst, 2747); +#endif + dasm_put(Dst, 2749); +#if LJ_HASJIT + dasm_put(Dst, 2751); +#endif + dasm_put(Dst, 2753); +#if LJ_HASFFI + dasm_put(Dst, 2816); +#endif +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop) +{ + int vk = 0; + dasm_put(Dst, 2818, defop); + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + dasm_put(Dst, 2820, -(BCBIAS_J*4 >> 16)); + if (op == BC_ISLE || op == BC_ISGT) { + dasm_put(Dst, 2834); + } + if (op == BC_ISLT || op == BC_ISLE) { + dasm_put(Dst, 2837); + } else { + dasm_put(Dst, 2839); + } + dasm_put(Dst, 2841); + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + dasm_put(Dst, 2852, -(BCBIAS_J*4 >> 16)); + if (vk) { + dasm_put(Dst, 2866); + } else { + dasm_put(Dst, 2868); + } + dasm_put(Dst, 2870, ~LJ_TISPRI, ~LJ_TISTABUD); + if (vk) { + dasm_put(Dst, 2892); + } else { + dasm_put(Dst, 2894); + } + dasm_put(Dst, 2896); + if (vk) { + dasm_put(Dst, 2898); + } else { + dasm_put(Dst, 2900); + } + dasm_put(Dst, 2902, Dt6(->metatable), 1-vk, Dt6(->nomm), 1<<MM_eq); + break; + + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + dasm_put(Dst, 2923, 32-1, -(BCBIAS_J*4 >> 16)); + if (vk) { + dasm_put(Dst, 2937); + } else { + dasm_put(Dst, 2939); + } + dasm_put(Dst, 2941); + break; + + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + dasm_put(Dst, 2952, -(BCBIAS_J*4 >> 16)); + if (vk) { + dasm_put(Dst, 2966); + } else { + dasm_put(Dst, 2969); + } + dasm_put(Dst, 2971); + if (!vk) { + dasm_put(Dst, 2983); + } + break; + + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + dasm_put(Dst, 2989, 32-3, -(BCBIAS_J*4 >> 16)); + if (vk) { + dasm_put(Dst, 3001); + } else { + dasm_put(Dst, 3003); + } + dasm_put(Dst, 3005); + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + dasm_put(Dst, 3016); + if (op == BC_IST || op == BC_ISF) { + dasm_put(Dst, 3022, -(BCBIAS_J*4 >> 16)); + if (op == BC_IST) { + dasm_put(Dst, 3027); + } else { + dasm_put(Dst, 3029); + } + } else { + if (op == BC_ISTC) { + dasm_put(Dst, 3031); + } else { + dasm_put(Dst, 3034); + } + dasm_put(Dst, 3037, -(BCBIAS_J*4 >> 16)); + } + dasm_put(Dst, 3044); + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + dasm_put(Dst, 3055); + break; + case BC_NOT: + dasm_put(Dst, 3068, LJ_TTRUE); + break; + case BC_UNM: + dasm_put(Dst, 3084); + break; + case BC_LEN: + dasm_put(Dst, 3101, Dt5(->len)); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 3125, Dt6(->metatable)); +#endif + dasm_put(Dst, 3132); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 3138, Dt6(->nomm), 1<<MM_len); +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3148); + break; + case 1: + dasm_put(Dst, 3154); + break; + default: + dasm_put(Dst, 3160); + break; + } + dasm_put(Dst, 3167); + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3180); + break; + case 1: + dasm_put(Dst, 3186); + break; + default: + dasm_put(Dst, 3192); + break; + } + dasm_put(Dst, 3199); + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3212); + break; + case 1: + dasm_put(Dst, 3218); + break; + default: + dasm_put(Dst, 3224); + break; + } + dasm_put(Dst, 3231); + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3244); + break; + case 1: + dasm_put(Dst, 3250); + break; + default: + dasm_put(Dst, 3256); + break; + } + dasm_put(Dst, 3263); + break; + case BC_MODVN: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3276); + break; + case 1: + dasm_put(Dst, 3282); + break; + default: + dasm_put(Dst, 3288); + break; + } + dasm_put(Dst, 3295); + break; + case BC_MODNV: case BC_MODVV: + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 3313); + break; + case 1: + dasm_put(Dst, 3319); + break; + default: + dasm_put(Dst, 3325); + break; + } + dasm_put(Dst, 3332); + break; + case BC_POW: + dasm_put(Dst, 3335); + break; + + case BC_CAT: + dasm_put(Dst, 3357, Dt1(->base), 32-3, Dt1(->base)); + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + dasm_put(Dst, 3387, 32-1); + break; + case BC_KCDATA: +#if LJ_HASFFI + dasm_put(Dst, 3404, 32-1, LJ_TCDATA); +#endif + break; + case BC_KSHORT: + dasm_put(Dst, 3423, 32-3); + break; + case BC_KNUM: + dasm_put(Dst, 3439); + break; + case BC_KPRI: + dasm_put(Dst, 3452, 32-3); + break; + case BC_KNIL: + dasm_put(Dst, 3467); + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + dasm_put(Dst, 3486, 32-1, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETV: + dasm_put(Dst, 3507, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, DtA(->closed), -(LJ_TISNUM+1), LJ_TISGCV - (LJ_TISNUM+1), Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G); + break; + case BC_USETS: + dasm_put(Dst, 3559, 32-1, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, Dt5(->marked), DtA(->closed), LJ_GC_WHITES, GG_DISP2G); + break; + case BC_USETN: + dasm_put(Dst, 3608, 32-1, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETP: + dasm_put(Dst, 3629, 32-1, offsetof(GCfuncL, uvptr), 32-3, DtA(->v)); + break; + + case BC_UCLO: + dasm_put(Dst, 3652, Dt1(->openupval), 32-1, -(BCBIAS_J*4 >> 16), Dt1(->base), Dt1(->base)); + break; + + case BC_FNEW: + dasm_put(Dst, 3682, 32-1, Dt1(->base), Dt1(->base)); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + case BC_TDUP: + dasm_put(Dst, 3708, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base)); + if (op == BC_TNEW) { + dasm_put(Dst, 3721); + } else { + dasm_put(Dst, 3729, 32-1); + } + dasm_put(Dst, 3736, Dt1(->base)); + break; + + case BC_GGET: + case BC_GSET: + dasm_put(Dst, 3759, 32-1, Dt7(->env)); + if (op == BC_GGET) { + dasm_put(Dst, 3767); + } else { + dasm_put(Dst, 3770); + } + break; + + case BC_TGETV: + dasm_put(Dst, 3773, Dt6(->asize), Dt6(->array), 31-3, Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + case BC_TGETS: + dasm_put(Dst, 3831, 32-1, Dt6(->hmask), Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), DtB(->val), DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + dasm_put(Dst, 3895); + break; + case BC_TGETB: + dasm_put(Dst, 3900, 32-3, Dt6(->asize), Dt6(->array), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + + case BC_TSETV: + dasm_put(Dst, 3944, Dt6(->asize), Dt6(->array), 31-3, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex); + dasm_put(Dst, 4011, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + break; + case BC_TSETS: + dasm_put(Dst, 4023, 32-1, Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), 31-5, 31-3, Dt6(->marked), DtB(->key), DtB(->val), LJ_GC_BLACK, DtB(->val), Dt6(->metatable)); + dasm_put(Dst, 4084, Dt6(->nomm), 1<<MM_newindex, DtB(->next), Dt6(->metatable), DISPATCH_GL(tmptv), Dt1(->base), Dt6(->nomm), 1<<MM_newindex, Dt1(->base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain)); + dasm_put(Dst, 4135, Dt6(->marked), Dt6(->gclist)); + break; + case BC_TSETB: + dasm_put(Dst, 4142, 32-3, Dt6(->asize), Dt6(->array), Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked)); + dasm_put(Dst, 4202, Dt6(->gclist)); + break; + + case BC_TSETM: + dasm_put(Dst, 4207, 32-3, Dt6(->asize), 31-3, Dt6(->marked), Dt6(->array), LJ_GC_BLACK, Dt1(->base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist)); + dasm_put(Dst, 4276); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALLM: + dasm_put(Dst, 4279); + break; + case BC_CALL: + dasm_put(Dst, 4281, Dt7(->pc)); + break; + + case BC_CALLMT: + dasm_put(Dst, 4301); + break; + case BC_CALLT: + dasm_put(Dst, 4303, FRAME_TYPE, Dt7(->ffid), FRAME_VARG, Dt7(->pc), -4-8, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP); + dasm_put(Dst, 4368, FRAME_TYPE); + break; + + case BC_ITERC: + dasm_put(Dst, 4375, Dt7(->pc)); + break; + + case BC_ITERN: +#if LJ_HASJIT +#endif + dasm_put(Dst, 4401, Dt6(->asize), Dt6(->array), 31-3, -(BCBIAS_J*4 >> 16), Dt6(->hmask), Dt6(->node), 31-5, 31-3, DtB(->key), -(BCBIAS_J*4 >> 16)); + dasm_put(Dst, 4480); + break; + + case BC_ISNEXT: + dasm_put(Dst, 4484, LJ_TTAB, LJ_TFUNC, LJ_TNIL, Dt8(->ffid), FF_next_N, 32-1, -(BCBIAS_J*4 >> 16), BC_JMP, BC_ITERC, -(BCBIAS_J*4 >> 16)); + break; + + case BC_VARG: + dasm_put(Dst, 4535, FRAME_VARG, Dt1(->maxstack), Dt1(->top), Dt1(->base), 32-3, Dt1(->base)); + dasm_put(Dst, 4615); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + dasm_put(Dst, 4621); + break; + + case BC_RET: + dasm_put(Dst, 4623, FRAME_TYPE, FRAME_VARG, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP); + break; + + case BC_RET0: case BC_RET1: + dasm_put(Dst, 4693, FRAME_TYPE, FRAME_VARG); + if (op == BC_RET1) { + dasm_put(Dst, 4706); + } + dasm_put(Dst, 4709, Dt7(->pc), PC2PROTO(k)); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORL: +#if LJ_HASJIT + dasm_put(Dst, 4737); +#endif + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + dasm_put(Dst, 4739, FORL_IDX*8, FORL_STEP*8, FORL_STOP*8); + if (!vk) { + dasm_put(Dst, 4747); + } + if (vk) { + dasm_put(Dst, 4755, FORL_IDX*8); + } + dasm_put(Dst, 4759, FORL_EXT*8); + if (op != BC_JFORL) { + dasm_put(Dst, 4767, 32-1); + if (op == BC_JFORI) { + dasm_put(Dst, 4771, -(BCBIAS_J*4 >> 16)); + } else { + dasm_put(Dst, 4774, -(BCBIAS_J*4 >> 16)); + } + } + if (op == BC_FORI) { + dasm_put(Dst, 4777); + } else if (op == BC_IFORL) { + dasm_put(Dst, 4779); + } else { + dasm_put(Dst, 4781, BC_JLOOP); + } + dasm_put(Dst, 4784); + break; + + case BC_ITERL: +#if LJ_HASJIT + dasm_put(Dst, 4799); +#endif + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + dasm_put(Dst, 4801); + if (op == BC_JITERL) { + dasm_put(Dst, 4807); + } else { + dasm_put(Dst, 4809, 32-1, -(BCBIAS_J*4 >> 16)); + } + dasm_put(Dst, 4816); + break; + + case BC_LOOP: +#if LJ_HASJIT + dasm_put(Dst, 4828); +#endif + break; + + case BC_ILOOP: + dasm_put(Dst, 4830); + break; + + case BC_JLOOP: +#if LJ_HASJIT + dasm_put(Dst, 4841); +#endif + break; + + case BC_JMP: + dasm_put(Dst, 4843, 32-1, -(BCBIAS_J*4 >> 16)); + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: +#if LJ_HASJIT + dasm_put(Dst, 4859); +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + dasm_put(Dst, 4861, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k), 31-3); + if (op == BC_JFUNCF) { + dasm_put(Dst, 4879); + } else { + dasm_put(Dst, 4881); + } + dasm_put(Dst, 4890); + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + dasm_put(Dst, 4896); + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + dasm_put(Dst, 4898, Dt1(->maxstack), 8+FRAME_VARG, -4+PC2PROTO(k), -4+PC2PROTO(numparams)); + break; + + case BC_FUNCC: + case BC_FUNCCW: + if (op == BC_FUNCC) { + dasm_put(Dst, 4948, Dt8(->f)); + } else { + dasm_put(Dst, 4951, DISPATCH_GL(wrapf)); + } + dasm_put(Dst, 4954, Dt1(->maxstack), Dt1(->base), Dt1(->top), ~LJ_VMST_C); + if (op == BC_FUNCCW) { + dasm_put(Dst, 4967, Dt8(->f)); + } + dasm_put(Dst, 4970, DISPATCH_GL(vmstate), Dt1(->top), 31-3, Dt1(->base), ~LJ_VMST_INTERP, DISPATCH_GL(vmstate)); + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx); + + dasm_put(Dst, 4991); + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ + int i; + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n", + (int)ctx->codesz, CFRAME_SIZE); + for (i = 14; i <= 31; i++) +#if LJ_TARGET_PPCSPE + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n", + 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i)); +#else +#error "missing frame info for saved registers" +#endif + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE0:\n\n"); + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -4\n" + "\t.byte 65\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n" + "\t.align 2\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" + "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n", + (int)ctx->codesz, CFRAME_SIZE); + for (i = 14; i <= 31; i++) +#if LJ_TARGET_PPCSPE + fprintf(ctx->fp, + "\t.byte %d\n\t.uleb128 %d\n" + "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n", + 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i)); +#else +#error "missing frame info for saved registers" +#endif + fprintf(ctx->fp, + "\t.align 2\n" + ".LEFDE1:\n\n"); + break; + default: + break; + } +} + diff --git a/src/luajit2/src/buildvm_x64.h b/src/luajit2/src/buildvm_x64.h new file mode 100644 index 0000000000..f6ed666c73 --- /dev/null +++ b/src/luajit2/src/buildvm_x64.h @@ -0,0 +1,3249 @@ +/* +** This file has been pre-processed with DynASM. +** http://luajit.org/dynasm.html +** DynASM version 1.3.0, DynASM x64 version 1.3.0 +** DO NOT EDIT! The original file is in "buildvm_x86.dasc". +*/ + +#if DASM_VERSION != 10300 +#error "Version mismatch between DynASM and included encoding engine" +#endif + +#define DASM_SECTION_CODE_OP 0 +#define DASM_SECTION_CODE_SUB 1 +#define DASM_MAXSECTION 2 +static const unsigned char build_actionlist[16156] = { + 254,1,248,10,252,247,195,237,15,132,244,11,131,227,252,248,41,218,72,141, + 76,25,252,248,139,90,252,252,199,68,10,4,237,248,12,131,192,1,137,68,36,4, + 252,247,195,237,15,132,244,13,248,14,129,252,243,239,252,247,195,237,15,133, + 244,10,65,199,134,233,237,131,227,252,248,41,211,252,247,219,131,232,1,15, + 132,244,248,248,1,72,139,44,10,72,137,106,252,248,131,194,8,131,232,1,15, + 133,244,1,248,2,255,139,108,36,24,137,157,233,248,3,139,68,36,4,139,76,36, + 16,248,4,57,193,15,133,244,252,248,5,131,252,234,8,137,149,233,248,15,72, + 139,76,36,32,72,137,141,233,49,192,248,16,72,131,196,40,65,94,65,95,91,93, + 195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252,237,131, + 194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193,141,20, + 202,252,233,244,5,248,8,137,149,233,137,68,36,4,137,206,137,252,239,232,251, + 1,0,139,149,233,252,233,244,3,248,17,137,252,240,72,137,252,252,248,18,139, + 108,36,24,139,173,233,199,133,233,237,252,233,244,16,248,19,139,124,36,24, + 137,198,72,131,196,40,65,94,65,95,91,93,252,233,251,1,1,248,20,72,129,231, + 239,72,137,252,252,248,21,255,139,108,36,24,72,199,193,252,248,252,255,252, + 255,252,255,184,237,139,149,233,68,139,181,233,65,129,198,239,139,90,252, + 252,199,66,252,252,237,65,199,134,233,237,252,233,244,12,248,22,190,237,252, + 233,244,248,248,23,131,232,8,252,233,244,247,248,24,141,68,194,252,248,248, + 1,15,182,139,233,131,195,4,137,149,233,255,137,133,233,137,92,36,28,137,206, + 248,2,137,252,239,232,251,1,0,139,149,233,139,133,233,139,106,252,248,41, + 208,193,232,3,131,192,1,139,157,233,139,11,15,182,252,233,15,182,205,131, + 195,4,65,252,255,36,252,238,248,25,85,83,65,87,65,86,72,131,252,236,40,137, + 252,253,137,124,36,24,137,252,241,187,237,49,192,76,141,188,253,36,233,68, + 139,181,233,65,129,198,239,76,137,189,233,137,68,36,28,72,137,68,36,32,137, + 68,36,16,137,68,36,20,56,133,233,15,132,244,249,65,199,134,233,237,136,133, + 233,139,149,233,139,133,233,41,200,193,232,3,131,192,1,41,209,139,90,252, + 252,137,68,36,4,252,247,195,237,255,15,132,244,13,252,233,244,14,248,26,85, + 83,65,87,65,86,72,131,252,236,40,187,237,137,76,36,20,252,233,244,247,248, + 27,85,83,65,87,65,86,72,131,252,236,40,187,237,248,1,137,84,36,16,137,252, + 253,137,124,36,24,137,252,241,76,139,189,233,76,137,124,36,32,137,108,36, + 28,72,137,165,233,248,2,68,139,181,233,65,129,198,239,248,3,65,199,134,233, + 237,139,149,233,255,1,203,41,211,139,133,233,41,200,193,232,3,131,192,1,248, + 28,139,105,252,248,129,121,253,252,252,239,15,133,244,29,248,30,137,202,137, + 90,252,252,139,157,233,139,11,15,182,252,233,15,182,205,131,195,4,65,252, + 255,36,252,238,248,31,85,83,65,87,65,86,72,131,252,236,40,137,252,253,137, + 124,36,24,137,108,36,28,68,139,189,233,68,43,189,233,199,68,36,20,0,0,0,0, + 68,137,124,36,16,76,139,189,233,76,137,124,36,32,72,137,165,233,252,255,209, + 133,192,15,132,244,15,137,193,187,237,252,233,244,2,248,11,1,209,131,227, + 252,248,137,213,41,218,199,68,193,252,252,237,137,200,139,93,252,244,72,99, + 77,252,240,133,201,15,132,244,247,255,76,141,61,245,76,1,252,249,68,139,122, + 252,248,69,139,191,233,69,139,191,233,252,255,225,248,1,41,213,193,252,237, + 3,141,69,252,255,252,233,244,32,248,33,15,182,75,252,255,131,252,237,16,141, + 12,202,41,252,233,15,132,244,34,252,247,217,193,252,233,3,139,124,36,24,137, + 151,233,137,202,72,139,8,72,137,77,0,137,252,238,252,233,244,35,248,36,137, + 4,36,199,68,36,4,237,72,141,4,36,128,123,252,252,235,15,133,244,247,65,141, + 142,233,137,41,199,65,4,237,255,137,205,252,233,244,248,248,37,15,182,67, + 252,254,255,199,68,36,4,237,137,4,36,255,252,242,15,42,192,252,242,15,17, + 4,36,255,72,141,4,36,252,233,244,247,248,38,15,182,67,252,254,141,4,194,248, + 1,15,182,107,252,255,141,44,252,234,248,2,139,124,36,24,137,151,233,137,252, + 238,72,137,194,137,252,253,137,92,36,28,232,251,1,2,139,149,233,133,192,15, + 132,244,249,248,34,15,182,75,252,253,72,139,40,72,137,44,202,139,3,15,182, + 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,139,141,233, + 137,89,252,244,141,153,233,41,211,139,105,252,248,184,237,252,233,244,30, + 248,39,137,4,36,199,68,36,4,237,72,141,4,36,128,123,252,252,235,15,133,244, + 247,255,65,141,142,233,137,41,199,65,4,237,137,205,252,233,244,248,248,40, + 15,182,67,252,254,255,72,141,4,36,252,233,244,247,248,41,15,182,67,252,254, + 141,4,194,248,1,15,182,107,252,255,141,44,252,234,248,2,139,124,36,24,137, + 151,233,137,252,238,72,137,194,137,252,253,137,92,36,28,232,251,1,3,139,149, + 233,133,192,15,132,244,249,15,182,75,252,253,72,139,44,202,72,137,40,248, + 42,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238, + 248,3,139,141,233,137,89,252,244,15,182,67,252,253,72,139,44,194,72,137,105, + 16,141,153,233,41,211,139,105,252,248,184,237,252,233,244,30,248,43,139,108, + 36,24,137,149,233,141,52,202,141,20,194,137,252,239,15,182,75,252,252,137, + 92,36,28,232,251,1,4,248,3,139,149,233,255,131,252,248,1,15,135,244,44,248, + 4,141,91,4,15,130,244,252,248,5,15,183,67,252,254,141,156,253,131,233,248, + 6,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238, + 248,45,131,195,4,129,120,253,4,239,15,130,244,5,252,233,244,6,248,46,129, + 120,253,4,239,252,233,244,4,248,47,131,252,235,4,137,206,137,252,233,139, + 108,36,24,137,149,233,255,137,194,137,252,239,137,92,36,28,232,251,1,5,252, + 233,244,3,248,48,255,131,252,235,4,139,108,36,24,137,149,233,137,252,239, + 139,115,252,252,137,92,36,28,232,251,1,6,252,233,244,3,255,248,49,255,15, + 182,107,252,255,255,248,50,65,141,4,199,252,233,244,247,248,51,255,248,52, + 65,141,4,199,141,44,252,234,149,252,233,244,248,248,53,141,4,194,137,197, + 252,233,244,248,248,54,255,248,55,141,4,194,248,1,141,44,252,234,248,2,141, + 12,202,68,15,182,67,252,252,137,206,137,193,139,124,36,24,137,151,233,137, + 252,234,137,252,253,137,92,36,28,232,251,1,7,139,149,233,133,192,15,132,244, + 42,248,44,137,193,41,208,137,89,252,244,141,152,233,184,237,252,233,244,28, + 248,56,139,108,36,24,137,149,233,141,52,194,137,252,239,137,92,36,28,232, + 251,1,8,139,149,233,255,133,192,15,133,244,44,15,183,67,252,254,139,60,194, + 252,233,244,57,255,252,233,244,44,255,248,58,141,76,202,8,248,29,137,76,36, + 4,137,4,36,131,252,233,8,139,108,36,24,137,149,233,137,206,141,20,193,137, + 252,239,137,92,36,28,232,251,1,9,139,149,233,139,76,36,4,139,4,36,139,105, + 252,248,131,192,1,65,57,215,15,132,244,59,137,202,137,90,252,252,139,157, + 233,139,11,15,182,252,233,15,182,205,131,195,4,65,252,255,36,252,238,248, + 60,139,108,36,24,137,149,233,137,206,137,252,239,137,92,36,28,232,251,1,10, + 139,149,233,139,67,252,252,15,182,204,15,182,232,193,232,16,65,252,255,164, + 253,252,238,233,248,61,129,252,248,239,15,130,244,62,139,106,4,129,252,253, + 239,15,131,244,62,139,90,252,252,137,68,36,4,137,106,252,252,139,42,137,106, + 252,248,131,232,2,15,132,244,248,255,137,209,248,1,131,193,8,72,139,41,72, + 137,105,252,248,131,232,1,15,133,244,1,248,2,139,68,36,4,252,233,244,63,248, + 64,129,252,248,239,15,130,244,62,139,106,4,137,252,233,193,252,249,15,131, + 252,249,252,254,15,132,244,249,184,237,252,247,213,57,232,255,15,71,197,255, + 15,134,244,247,137,232,248,1,255,248,2,139,106,252,248,139,132,253,197,233, + 139,90,252,252,199,66,252,252,237,137,66,252,248,252,233,244,65,248,3,184, + 237,252,233,244,2,248,66,129,252,248,239,15,130,244,62,139,106,4,139,90,252, + 252,129,252,253,239,15,133,244,252,248,1,139,42,139,173,233,248,2,133,252, + 237,199,66,252,252,237,255,15,132,244,65,65,139,134,233,199,66,252,252,237, + 137,106,252,248,139,141,233,35,136,233,105,201,239,3,141,233,248,3,129,185, + 233,239,15,133,244,250,57,129,233,15,132,244,251,248,4,139,137,233,133,201, + 15,133,244,3,255,252,233,244,65,248,5,139,105,4,129,252,253,239,15,132,244, + 65,139,1,137,106,252,252,137,66,252,248,252,233,244,65,248,6,129,252,253, + 239,15,132,244,1,129,252,253,239,15,135,244,254,129,252,253,239,15,134,244, + 253,189,237,252,233,244,254,248,7,255,189,237,248,8,252,247,213,65,139,172, + 253,174,233,252,233,244,2,248,67,129,252,248,239,15,130,244,62,129,122,253, + 4,239,15,133,244,62,139,42,131,189,233,0,15,133,244,62,129,122,253,12,239, + 15,133,244,62,139,66,8,137,133,233,139,90,252,252,199,66,252,252,237,255, + 137,106,252,248,252,246,133,233,235,15,132,244,247,128,165,233,235,65,139, + 134,233,65,137,174,233,137,133,233,248,1,252,233,244,65,248,68,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,133,244,62,137,213,139,50,141,82,8, + 139,124,36,24,232,251,1,11,137,252,234,72,139,40,139,90,252,252,72,137,106, + 252,248,252,233,244,65,248,69,255,129,252,248,239,15,133,244,62,129,122,253, + 4,239,255,15,133,244,247,139,42,252,233,244,70,248,1,15,135,244,62,255,15, + 131,244,62,255,252,242,15,16,2,252,233,244,71,255,221,2,252,233,244,72,255, + 248,73,129,252,248,239,15,130,244,62,139,90,252,252,129,122,253,4,239,15, + 133,244,249,139,2,248,2,199,66,252,252,237,137,66,252,248,252,233,244,65, + 248,3,129,122,253,4,239,15,135,244,62,65,131,190,233,0,15,133,244,62,65,139, + 174,233,65,59,174,233,255,15,130,244,247,232,244,74,248,1,139,108,36,24,137, + 149,233,137,92,36,28,137,214,137,252,239,255,232,251,1,12,255,232,251,1,13, + 255,139,149,233,252,233,244,2,248,75,129,252,248,239,15,130,244,62,15,132, + 244,248,248,1,129,122,253,4,239,15,133,244,62,139,108,36,24,137,149,233,137, + 149,233,139,90,252,252,139,50,141,82,8,137,252,239,137,92,36,28,232,251,1, + 14,139,149,233,133,192,15,132,244,249,72,139,106,8,72,139,66,16,72,137,106, + 252,248,72,137,2,248,76,184,237,255,252,233,244,77,248,2,199,66,12,237,252, + 233,244,1,248,3,199,66,252,252,237,252,233,244,65,248,78,129,252,248,239, + 15,130,244,62,139,42,129,122,253,4,239,15,133,244,62,255,131,189,233,0,15, + 133,244,62,255,139,106,252,248,139,133,233,139,90,252,252,199,66,252,252, + 237,137,66,252,248,199,66,12,237,184,237,252,233,244,77,248,79,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,133,244,62,129,122,253,12,239,255, + 139,90,252,252,255,139,66,8,131,192,1,199,66,252,252,237,137,66,252,248,255, + 252,242,15,16,66,8,72,189,237,237,102,72,15,110,205,252,242,15,88,193,252, + 242,15,45,192,252,242,15,17,66,252,248,255,139,42,59,133,233,15,131,244,248, + 193,224,3,3,133,233,248,1,129,120,253,4,239,15,132,244,80,72,139,40,72,137, + 42,252,233,244,76,248,2,131,189,233,0,15,132,244,80,137,252,239,137,213,137, + 198,232,251,1,15,137,252,234,133,192,15,133,244,1,248,80,184,237,252,233, + 244,77,248,81,255,139,106,252,248,139,133,233,139,90,252,252,199,66,252,252, + 237,137,66,252,248,255,199,66,12,237,199,66,8,0,0,0,0,255,15,87,192,252,242, + 15,17,66,8,255,217,252,238,221,90,8,255,184,237,252,233,244,77,248,82,129, + 252,248,239,15,130,244,62,141,74,8,131,232,1,187,237,248,1,65,15,182,174, + 233,193,252,237,235,131,229,1,1,252,235,252,233,244,28,248,83,129,252,248, + 239,15,130,244,62,129,122,253,12,239,15,133,244,62,255,139,106,4,137,106, + 12,199,66,4,237,139,42,139,90,8,137,106,8,137,26,141,74,16,131,232,2,187, + 237,252,233,244,1,248,84,129,252,248,239,15,130,244,62,139,42,139,90,252, + 252,137,92,36,28,137,44,36,129,122,253,4,239,15,133,244,62,72,131,189,233, + 0,15,133,244,62,128,189,233,235,15,135,244,62,139,141,233,15,132,244,247, + 255,59,141,233,15,132,244,62,248,1,141,92,193,252,240,59,157,233,15,135,244, + 62,137,157,233,139,108,36,24,137,149,233,131,194,8,137,149,233,141,108,194, + 232,72,41,221,57,203,15,132,244,249,248,2,72,139,4,43,72,137,67,252,248,131, + 252,235,8,57,203,15,133,244,2,248,3,137,206,139,60,36,232,244,25,65,199,134, + 233,237,255,139,108,36,24,139,28,36,139,149,233,129,252,248,239,15,135,244, + 254,248,4,139,139,233,68,139,187,233,137,139,233,68,137,252,251,41,203,15, + 132,244,252,141,4,26,193,252,235,3,59,133,233,15,135,244,255,137,213,72,41, + 205,248,5,72,139,1,72,137,4,41,131,193,8,68,57,252,249,15,133,244,5,248,6, + 141,67,2,199,66,252,252,237,248,7,139,92,36,28,137,68,36,4,72,199,193,252, + 248,252,255,252,255,252,255,252,247,195,237,255,15,132,244,13,252,233,244, + 14,248,8,199,66,252,252,237,139,139,233,131,252,233,8,137,139,233,72,139, + 1,72,137,2,184,237,252,233,244,7,248,9,139,12,36,68,137,185,233,137,222,137, + 252,239,232,251,1,0,139,28,36,139,149,233,252,233,244,4,248,85,139,106,252, + 248,139,173,233,139,90,252,252,137,92,36,28,137,44,36,72,131,189,233,0,15, + 133,244,62,255,128,189,233,235,15,135,244,62,139,141,233,15,132,244,247,59, + 141,233,15,132,244,62,248,1,141,92,193,252,248,59,157,233,15,135,244,62,137, + 157,233,139,108,36,24,137,149,233,137,149,233,141,108,194,252,240,72,41,221, + 57,203,15,132,244,249,248,2,255,72,139,4,43,72,137,67,252,248,131,252,235, + 8,57,203,15,133,244,2,248,3,137,206,139,60,36,232,244,25,65,199,134,233,237, + 139,108,36,24,139,28,36,139,149,233,129,252,248,239,15,135,244,254,248,4, + 139,139,233,68,139,187,233,137,139,233,68,137,252,251,41,203,15,132,244,252, + 141,4,26,193,252,235,3,59,133,233,15,135,244,255,255,137,213,72,41,205,248, + 5,72,139,1,72,137,4,41,131,193,8,68,57,252,249,15,133,244,5,248,6,141,67, + 1,248,7,139,92,36,28,137,68,36,4,49,201,252,247,195,237,15,132,244,13,252, + 233,244,14,248,8,137,222,137,252,239,232,251,1,16,248,9,139,12,36,68,137, + 185,233,137,222,137,252,239,232,251,1,0,139,28,36,139,149,233,252,233,244, + 4,248,86,139,108,36,24,72,252,247,133,233,237,15,132,244,62,255,137,149,233, + 141,68,194,252,248,137,133,233,49,192,72,137,133,233,176,235,136,133,233, + 252,233,244,16,255,248,70,255,248,72,139,90,252,252,221,90,252,248,252,233, + 244,65,255,248,87,129,252,248,239,15,130,244,62,255,129,122,253,4,239,15, + 133,244,248,139,42,131,252,253,0,15,137,244,70,252,247,221,15,136,244,247, + 248,88,248,70,139,90,252,252,199,66,252,252,237,137,106,252,248,252,233,244, + 65,248,1,139,90,252,252,199,66,252,252,0,0,224,65,199,66,252,248,0,0,0,0, + 252,233,244,65,248,2,15,135,244,62,255,129,122,253,4,239,15,131,244,62,255, + 252,242,15,16,2,72,184,237,237,102,72,15,110,200,15,84,193,248,71,139,90, + 252,252,252,242,15,17,66,252,248,255,221,2,217,225,248,71,248,72,139,90,252, + 252,221,90,252,248,255,248,65,184,237,248,77,137,68,36,4,248,63,252,247,195, + 237,15,133,244,253,248,5,56,67,252,255,15,135,244,252,15,182,75,252,253,72, + 252,247,209,141,20,202,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65, + 252,255,36,252,238,248,6,199,68,194,252,244,237,131,192,1,252,233,244,5,248, + 7,72,199,193,252,248,252,255,252,255,252,255,252,233,244,14,248,89,255,129, + 122,253,4,239,15,133,244,247,139,42,252,233,244,70,248,1,15,135,244,62,255, + 252,242,15,16,2,232,244,90,255,252,242,15,45,232,129,252,253,0,0,0,128,15, + 133,244,70,252,242,15,42,205,102,15,46,193,15,138,244,71,15,132,244,70,255, + 221,2,232,244,90,255,248,91,255,252,242,15,16,2,232,244,92,255,221,2,232, + 244,92,255,248,93,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,252,242,15,81,2,252,233,244,71,255,248,93,129,252,248,239,15,130,244, + 62,129,122,253,4,239,15,131,244,62,221,2,217,252,250,252,233,244,72,255,248, + 94,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,217,252, + 237,221,2,217,252,241,252,233,244,72,248,95,129,252,248,239,15,130,244,62, + 129,122,253,4,239,15,131,244,62,217,252,236,221,2,217,252,241,252,233,244, + 72,248,96,129,252,248,239,255,15,130,244,62,129,122,253,4,239,15,131,244, + 62,221,2,232,244,97,252,233,244,72,248,98,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,131,244,62,221,2,217,252,254,252,233,244,72,248,99,129,252, + 248,239,255,15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,252,255, + 252,233,244,72,248,100,129,252,248,239,15,130,244,62,129,122,253,4,239,15, + 131,244,62,221,2,217,252,242,221,216,252,233,244,72,248,101,129,252,248,239, + 15,130,244,62,255,129,122,253,4,239,15,131,244,62,221,2,217,192,216,200,217, + 232,222,225,217,252,250,217,252,243,252,233,244,72,248,102,129,252,248,239, + 15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,192,216,200,217,232, + 222,225,217,252,250,217,201,217,252,243,252,233,244,72,248,103,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,131,244,62,255,221,2,217,232,217,252, + 243,252,233,244,72,255,248,104,129,252,248,239,15,130,244,62,129,122,253, + 4,239,15,131,244,62,252,242,15,16,2,255,137,213,232,251,1,17,137,252,234, + 252,233,244,71,255,248,105,129,252,248,239,15,130,244,62,129,122,253,4,239, + 15,131,244,62,252,242,15,16,2,255,137,213,232,251,1,18,137,252,234,252,233, + 244,71,255,248,106,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,252,242,15,16,2,255,137,213,232,251,1,19,137,252,234,252,233,244,71, + 248,107,255,248,108,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,252,242,15,16,2,139,106,252,248,252,242,15,89,133,233,252,233,244, + 71,255,248,108,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244, + 62,221,2,139,106,252,248,220,141,233,252,233,244,72,255,248,109,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131, + 244,62,221,2,221,66,8,217,252,243,252,233,244,72,248,110,129,252,248,239, + 15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,255,15,131, + 244,62,221,66,8,221,2,217,252,253,221,217,252,233,244,72,248,111,129,252, + 248,239,15,130,244,62,139,106,4,129,252,253,239,15,131,244,62,139,90,252, + 252,139,2,137,106,252,252,137,66,252,248,209,229,129,252,253,0,0,224,252, + 255,15,131,244,249,9,232,15,132,244,249,184,252,254,3,0,0,129,252,253,0,0, + 32,0,15,130,244,250,248,1,193,252,237,21,41,197,255,252,242,15,42,197,255, + 137,44,36,219,4,36,255,139,106,252,252,129,229,252,255,252,255,15,128,129, + 205,0,0,224,63,137,106,252,252,248,2,255,252,242,15,17,2,255,221,26,255,184, + 237,252,233,244,77,248,3,255,15,87,192,252,233,244,2,255,217,252,238,252, + 233,244,2,255,248,4,255,252,242,15,16,2,72,189,237,237,102,72,15,110,205, + 252,242,15,89,193,252,242,15,17,66,252,248,255,221,2,199,4,36,0,0,128,90, + 216,12,36,221,90,252,248,255,139,106,252,252,184,52,4,0,0,209,229,252,233, + 244,1,255,248,112,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,252,242,15,16,2,255,248,112,129,252,248,239,15,130,244,62,129,122, + 253,4,239,15,131,244,62,221,2,255,139,106,4,139,90,252,252,209,229,129,252, + 253,0,0,224,252,255,15,132,244,250,255,15,40,224,232,244,113,252,242,15,92, + 224,248,1,252,242,15,17,66,252,248,252,242,15,17,34,255,217,192,232,244,113, + 220,252,233,248,1,221,90,252,248,221,26,255,139,66,252,252,139,106,4,49,232, + 15,136,244,249,248,2,184,237,252,233,244,77,248,3,129,252,245,0,0,0,128,137, + 106,4,252,233,244,2,248,4,255,15,87,228,252,233,244,1,255,217,252,238,217, + 201,252,233,244,1,255,248,114,129,252,248,239,15,130,244,62,129,122,253,4, + 239,15,131,244,62,129,122,253,12,239,15,131,244,62,221,66,8,221,2,248,1,217, + 252,248,223,224,158,15,138,244,1,221,217,252,233,244,72,255,248,115,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239, + 15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,232,244,116,252,233,244, + 71,255,248,115,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244, + 62,129,122,253,12,239,15,131,244,62,221,2,221,66,8,232,244,116,252,233,244, + 72,255,248,117,185,2,0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248, + 1,57,193,15,131,244,70,129,124,253,202,252,252,239,15,133,244,249,59,108, + 202,252,248,15,79,108,202,252,248,131,193,1,252,233,244,1,248,3,15,135,244, + 62,255,252,233,244,252,248,4,15,135,244,62,255,252,242,15,16,2,248,5,57,193, + 15,131,244,71,129,124,253,202,252,252,239,255,15,130,244,252,15,135,244,62, + 252,242,15,42,76,202,252,248,252,233,244,253,255,248,6,252,242,15,16,76,202, + 252,248,248,7,252,242,15,93,193,131,193,1,252,233,244,5,255,248,118,185,2, + 0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,193,15,131,244, + 70,129,124,253,202,252,252,239,15,133,244,249,59,108,202,252,248,15,76,108, + 202,252,248,131,193,1,252,233,244,1,248,3,15,135,244,62,255,248,6,252,242, + 15,16,76,202,252,248,248,7,252,242,15,95,193,131,193,1,252,233,244,5,255, + 248,9,221,216,252,233,244,62,255,248,119,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,133,244,62,139,42,255,139,173,233,252,233,244,70,255,252, + 242,15,42,133,233,252,233,244,71,255,219,133,233,252,233,244,72,255,248,120, + 129,252,248,239,15,133,244,62,129,122,253,4,239,15,133,244,62,139,42,139, + 90,252,252,131,189,233,1,15,130,244,80,15,182,173,233,255,252,242,15,42,197, + 252,233,244,71,255,137,44,36,219,4,36,252,233,244,72,255,248,121,65,139,174, + 233,65,59,174,233,15,130,244,247,232,244,74,248,1,129,252,248,239,15,133, + 244,62,129,122,253,4,239,255,15,133,244,62,139,42,129,252,253,252,255,0,0, + 0,15,135,244,62,137,108,36,4,255,15,131,244,62,252,242,15,44,42,129,252,253, + 252,255,0,0,0,15,135,244,62,137,108,36,4,255,15,131,244,62,221,2,219,92,36, + 4,129,124,36,4,252,255,0,0,0,15,135,244,62,255,199,68,36,8,1,0,0,0,72,141, + 68,36,4,248,122,139,108,36,24,137,149,233,139,84,36,8,72,137,198,137,252, + 239,137,92,36,28,232,251,1,20,139,149,233,139,90,252,252,199,66,252,252,237, + 137,66,252,248,252,233,244,65,248,123,65,139,174,233,65,59,174,233,15,130, + 244,247,232,244,74,248,1,199,68,36,4,252,255,252,255,252,255,252,255,129, + 252,248,239,15,130,244,62,15,134,244,247,129,122,253,20,239,255,15,133,244, + 62,139,106,16,137,108,36,4,255,15,131,244,62,252,242,15,44,106,16,137,108, + 36,4,255,15,131,244,62,221,66,16,219,92,36,4,255,248,1,129,122,253,4,239, + 15,133,244,62,129,122,253,12,239,255,139,42,137,108,36,8,139,173,233,255, + 139,74,8,255,252,242,15,44,74,8,255,139,68,36,4,57,197,15,130,244,251,248, + 2,133,201,15,142,244,253,248,3,139,108,36,8,41,200,15,140,244,124,141,172, + 253,13,233,131,192,1,248,4,137,68,36,8,137,232,252,233,244,122,248,5,15,140, + 244,252,141,68,40,1,252,233,244,2,248,6,137,232,252,233,244,2,248,7,255,15, + 132,244,254,1,252,233,131,193,1,15,143,244,3,248,8,185,1,0,0,0,252,233,244, + 3,248,124,49,192,252,233,244,4,248,125,129,252,248,239,15,130,244,62,65,139, + 174,233,65,59,174,233,15,130,244,247,232,244,74,248,1,255,129,122,253,4,239, + 15,133,244,62,129,122,253,12,239,139,42,255,15,133,244,62,139,66,8,255,15, + 131,244,62,252,242,15,44,66,8,255,15,131,244,62,221,66,8,219,92,36,4,139, + 68,36,4,255,133,192,15,142,244,124,131,189,233,1,15,130,244,124,15,133,244, + 126,65,57,134,233,15,130,244,126,15,182,141,233,65,139,174,233,137,68,36, + 8,248,1,136,77,0,131,197,1,131,232,1,15,133,244,1,65,139,134,233,252,233, + 244,122,248,127,129,252,248,239,255,15,130,244,62,65,139,174,233,65,59,174, + 233,15,130,244,247,232,244,74,248,1,129,122,253,4,239,15,133,244,62,139,42, + 139,133,233,133,192,15,132,244,124,65,57,134,233,15,130,244,128,129,197,239, + 137,92,36,4,137,68,36,8,65,139,158,233,248,1,255,15,182,77,0,131,197,1,131, + 232,1,136,12,3,15,133,244,1,137,216,139,92,36,4,252,233,244,122,248,129,129, + 252,248,239,15,130,244,62,65,139,174,233,65,59,174,233,15,130,244,247,232, + 244,74,248,1,129,122,253,4,239,15,133,244,62,139,42,139,133,233,65,57,134, + 233,255,15,130,244,128,129,197,239,137,92,36,4,137,68,36,8,65,139,158,233, + 252,233,244,249,248,1,15,182,76,5,0,131,252,249,65,15,130,244,248,131,252, + 249,90,15,135,244,248,131,252,241,32,248,2,136,12,3,248,3,131,232,1,15,137, + 244,1,137,216,139,92,36,4,252,233,244,122,248,130,129,252,248,239,15,130, + 244,62,255,65,139,174,233,65,59,174,233,15,130,244,247,232,244,74,248,1,129, + 122,253,4,239,15,133,244,62,139,42,139,133,233,65,57,134,233,15,130,244,128, + 129,197,239,137,92,36,4,137,68,36,8,65,139,158,233,252,233,244,249,248,1, + 15,182,76,5,0,131,252,249,97,15,130,244,248,255,131,252,249,122,15,135,244, + 248,131,252,241,32,248,2,136,12,3,248,3,131,232,1,15,137,244,1,137,216,139, + 92,36,4,252,233,244,122,248,131,129,252,248,239,15,130,244,62,129,122,253, + 4,239,15,133,244,62,137,213,139,58,232,251,1,21,137,252,234,255,137,197,252, + 233,244,70,255,252,242,15,42,192,252,233,244,71,255,248,132,129,252,248,239, + 15,130,244,62,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,88, + 248,1,15,135,244,62,255,252,242,15,16,2,72,189,237,237,102,72,15,110,205, + 252,242,15,88,193,102,15,126,197,255,252,233,244,88,255,248,133,129,252,248, + 239,15,130,244,62,255,72,189,237,237,102,72,15,110,205,255,199,4,36,0,0,192, + 89,255,15,133,244,247,139,42,252,233,244,248,248,1,15,135,244,62,255,252, + 242,15,16,2,252,242,15,88,193,102,15,126,197,255,248,2,137,68,36,4,141,68, + 194,252,240,248,1,57,208,15,134,244,88,129,120,253,4,239,255,15,133,244,248, + 35,40,131,232,8,252,233,244,1,248,2,15,135,244,134,255,15,131,244,134,255, + 252,242,15,16,0,252,242,15,88,193,102,15,126,193,33,205,255,131,232,8,252, + 233,244,1,248,135,129,252,248,239,15,130,244,62,255,15,133,244,248,11,40, + 131,232,8,252,233,244,1,248,2,15,135,244,134,255,252,242,15,16,0,252,242, + 15,88,193,102,15,126,193,9,205,255,131,232,8,252,233,244,1,248,136,129,252, + 248,239,15,130,244,62,255,15,133,244,248,51,40,131,232,8,252,233,244,1,248, + 2,15,135,244,134,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,49, + 205,255,131,232,8,252,233,244,1,248,137,129,252,248,239,15,130,244,62,129, + 122,253,4,239,255,248,2,15,205,252,233,244,88,248,138,129,252,248,239,15, + 130,244,62,129,122,253,4,239,255,248,2,252,247,213,255,248,88,252,242,15, + 42,197,252,233,244,71,255,248,134,139,68,36,4,252,233,244,62,255,248,139, + 129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,2,129,122,253,12, + 239,15,133,244,62,139,74,8,255,248,139,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242,15,16, + 2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252, + 242,15,88,202,102,15,126,197,102,15,126,201,255,211,229,252,233,244,88,255, + 248,140,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,140,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239, + 15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15, + 110,213,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201, + 255,211,252,237,252,233,244,88,255,248,141,129,252,248,239,15,130,244,62, + 129,122,253,4,239,255,248,141,129,252,248,239,15,130,244,62,129,122,253,4, + 239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242,15,16,2,252,242, + 15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15, + 88,202,102,15,126,197,102,15,126,201,255,211,252,253,252,233,244,88,255,248, + 142,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,142,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131, + 244,62,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213, + 252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,211, + 197,252,233,244,88,255,248,143,129,252,248,239,15,130,244,62,129,122,253, + 4,239,255,248,143,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,129,122,253,12,239,15,131,244,62,252,242,15,16,2,252,242,15,16,74, + 8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102, + 15,126,197,102,15,126,201,255,211,205,252,233,244,88,248,126,184,237,252, + 233,244,62,248,128,184,237,248,62,139,108,36,24,139,90,252,252,137,92,36, + 28,137,149,233,141,68,194,252,248,141,136,233,137,133,233,139,66,252,248, + 59,141,233,15,135,244,251,137,252,239,252,255,144,233,139,149,233,133,192, + 15,143,244,77,248,1,255,139,141,233,41,209,193,252,233,3,133,192,141,65,1, + 139,106,252,248,15,133,244,32,139,157,233,139,11,15,182,252,233,15,182,205, + 131,195,4,65,252,255,36,252,238,248,32,137,209,252,247,195,237,15,133,244, + 249,15,182,107,252,253,72,252,247,213,141,20,252,234,252,233,244,28,248,3, + 137,221,131,229,252,248,41,252,234,252,233,244,28,248,5,190,237,137,252,239, + 232,251,1,0,139,149,233,49,192,252,233,244,1,248,74,93,72,137,108,36,8,139, + 108,36,24,137,92,36,28,137,149,233,255,141,68,194,252,248,137,252,239,137, + 133,233,232,251,1,22,139,149,233,139,133,233,41,208,193,232,3,131,192,1,72, + 139,108,36,8,85,195,248,144,255,65,15,182,134,233,168,235,15,133,244,251, + 168,235,15,133,244,247,168,235,15,132,244,247,65,252,255,142,233,252,233, + 244,247,255,248,145,65,15,182,134,233,168,235,15,133,244,251,252,233,244, + 247,248,146,65,15,182,134,233,168,235,15,133,244,251,168,235,15,132,244,251, + 65,252,255,142,233,15,132,244,247,168,235,15,132,244,251,248,1,255,139,108, + 36,24,137,149,233,137,222,137,252,239,232,251,1,23,248,3,139,149,233,248, + 4,15,182,75,252,253,248,5,15,182,107,252,252,15,183,67,252,254,65,252,255, + 164,253,252,238,233,248,147,131,195,4,139,77,232,137,76,36,4,252,233,244, + 4,248,148,255,139,106,252,248,139,173,233,15,182,133,233,141,4,194,139,108, + 36,24,137,149,233,137,133,233,137,222,65,141,190,233,73,137,174,233,137,92, + 36,28,232,251,1,24,252,233,244,3,255,248,149,137,92,36,28,255,248,150,255, + 137,92,36,28,131,203,1,248,1,255,141,68,194,252,248,139,108,36,24,137,149, + 233,137,133,233,137,222,137,252,239,232,251,1,25,199,68,36,28,0,0,0,0,255, + 131,227,252,254,255,139,149,233,72,137,193,139,133,233,41,208,72,137,205, + 15,182,75,252,253,193,232,3,131,192,1,252,255,229,248,151,255,65,85,65,84, + 65,83,65,82,65,81,65,80,87,86,85,72,141,108,36,88,85,83,82,81,80,15,182,69, + 252,248,138,101,252,240,76,137,125,252,248,76,137,117,252,240,68,139,117, + 0,65,139,142,233,65,199,134,233,237,65,137,134,233,65,137,142,233,72,129, + 252,236,239,72,131,197,128,252,242,68,15,17,125,252,248,252,242,68,15,17, + 117,252,240,252,242,68,15,17,109,232,252,242,68,15,17,101,224,252,242,68, + 15,17,93,216,252,242,68,15,17,85,208,252,242,68,15,17,77,200,252,242,68,15, + 17,69,192,252,242,15,17,125,184,252,242,15,17,117,176,252,242,15,17,109,168, + 252,242,15,17,101,160,252,242,15,17,93,152,252,242,15,17,85,144,252,242,15, + 17,77,136,252,242,15,17,69,128,65,139,174,233,65,139,150,233,73,137,174,233, + 65,199,134,233,0,0,0,0,137,149,233,72,137,230,65,141,190,233,232,251,1,26, + 72,139,141,233,72,129,225,239,72,137,204,137,169,233,139,149,233,139,153, + 233,252,233,244,247,255,248,152,255,72,131,196,16,248,1,76,139,108,36,8,76, + 139,36,36,133,192,15,136,244,249,137,68,36,4,68,139,122,252,248,69,139,191, + 233,69,139,191,233,65,199,134,233,0,0,0,0,65,199,134,233,237,139,3,15,182, + 204,15,182,232,131,195,4,193,232,16,129,252,253,239,15,130,244,248,139,68, + 36,4,248,2,65,252,255,36,252,238,248,3,252,247,216,137,252,239,137,198,232, + 251,1,1,255,248,90,255,217,124,36,4,137,68,36,8,102,184,0,4,102,11,68,36, + 4,102,37,252,255,252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108, + 36,4,139,68,36,8,195,255,248,153,72,184,237,237,102,72,15,110,208,72,184, + 237,237,102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244, + 247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,72,184, + 237,237,102,72,15,110,208,252,242,15,194,193,1,102,15,84,194,252,242,15,92, + 200,15,40,193,248,1,195,248,92,255,217,124,36,4,137,68,36,8,102,184,0,8,102, + 11,68,36,4,102,37,252,255,252,251,102,137,68,36,6,217,108,36,6,217,252,252, + 217,108,36,4,139,68,36,8,195,255,248,154,72,184,237,237,102,72,15,110,208, + 72,184,237,237,102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15, + 134,244,247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202, + 72,184,237,237,102,72,15,110,208,252,242,15,194,193,6,102,15,84,194,252,242, + 15,92,200,15,40,193,248,1,195,248,113,255,217,124,36,4,137,68,36,8,102,184, + 0,12,102,11,68,36,4,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4, + 139,68,36,8,195,255,248,155,72,184,237,237,102,72,15,110,208,72,184,237,237, + 102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102, + 15,85,208,15,40,193,252,242,15,88,203,252,242,15,92,203,72,184,237,237,102, + 72,15,110,216,252,242,15,194,193,1,102,15,84,195,252,242,15,92,200,102,15, + 86,202,15,40,193,248,1,195,248,156,255,15,40,232,252,242,15,94,193,72,184, + 237,237,102,72,15,110,208,72,184,237,237,102,72,15,110,216,15,40,224,102, + 15,84,226,102,15,46,220,15,134,244,247,102,15,85,208,252,242,15,88,227,252, + 242,15,92,227,102,15,86,226,72,184,237,237,102,72,15,110,208,252,242,15,194, + 196,1,102,15,84,194,252,242,15,92,224,15,40,197,252,242,15,89,204,252,242, + 15,92,193,195,248,1,252,242,15,89,200,15,40,197,252,242,15,92,193,195,255, + 217,193,216,252,241,217,124,36,4,102,184,0,4,102,11,68,36,4,102,37,252,255, + 252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,222,201,222, + 252,233,195,255,248,97,217,252,234,222,201,248,157,217,84,36,252,248,129, + 124,36,252,248,0,0,128,127,15,132,244,247,129,124,36,252,248,0,0,128,252, + 255,15,132,244,248,248,158,217,192,217,252,252,220,252,233,217,201,217,252, + 240,217,232,222,193,217,252,253,221,217,248,1,195,248,2,221,216,217,252,238, + 195,255,248,116,255,248,159,252,242,15,45,193,252,242,15,42,208,102,15,46, + 202,15,133,244,254,15,138,244,255,248,160,131,252,248,1,15,142,244,252,248, + 1,169,1,0,0,0,15,133,244,248,252,242,15,89,192,209,232,252,233,244,1,248, + 2,209,232,15,132,244,251,15,40,200,248,3,252,242,15,89,192,209,232,15,132, + 244,250,15,131,244,3,255,252,242,15,89,200,252,233,244,3,248,4,252,242,15, + 89,193,248,5,195,248,6,15,132,244,5,15,130,244,253,252,247,216,232,244,1, + 72,184,237,237,102,72,15,110,200,252,242,15,94,200,15,40,193,195,248,7,72, + 184,237,237,102,72,15,110,192,195,248,8,102,72,15,126,200,72,209,224,72,193, + 192,12,72,61,252,254,15,0,0,15,132,244,248,102,72,15,126,192,72,209,224,15, + 132,244,250,255,72,193,192,12,72,61,252,254,15,0,0,15,132,244,251,252,242, + 15,17,76,36,252,240,252,242,15,17,68,36,252,248,221,68,36,252,240,221,68, + 36,252,248,217,252,241,217,192,217,252,252,220,252,233,217,201,217,252,240, + 217,232,222,193,217,252,253,221,217,221,92,36,252,248,252,242,15,16,68,36, + 252,248,195,248,9,72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244, + 247,15,40,193,248,1,195,248,2,72,184,237,237,102,72,15,110,208,102,15,84, + 194,72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244,1,102,15,80, + 193,15,87,192,136,196,15,146,208,48,224,15,133,244,1,248,3,72,184,237,237, + 255,102,72,15,110,192,195,248,4,102,15,80,193,133,192,15,133,244,3,15,87, + 192,195,248,5,102,15,80,193,133,192,15,132,244,3,15,87,192,195,248,161,255, + 131,252,255,1,15,130,244,90,15,132,244,92,131,252,255,3,15,130,244,113,15, + 135,244,248,252,242,15,81,192,195,248,2,252,242,15,17,68,36,252,248,221,68, + 36,252,248,131,252,255,5,15,135,244,248,15,132,244,247,232,244,97,252,233, + 244,253,248,1,232,244,157,255,252,233,244,253,248,2,131,252,255,7,15,132, + 244,247,15,135,244,248,217,252,237,217,201,217,252,241,252,233,244,253,248, + 1,217,232,217,201,217,252,241,252,233,244,253,248,2,131,252,255,9,15,132, + 244,247,15,135,244,248,217,252,236,217,201,217,252,241,252,233,244,253,248, + 1,255,217,252,254,252,233,244,253,248,2,131,252,255,11,15,132,244,247,15, + 135,244,255,217,252,255,252,233,244,253,248,1,217,252,242,221,216,248,7,221, + 92,36,252,248,252,242,15,16,68,36,252,248,195,255,139,124,36,12,221,68,36, + 4,131,252,255,1,15,130,244,90,15,132,244,92,131,252,255,3,15,130,244,113, + 15,135,244,248,217,252,250,195,248,2,131,252,255,5,15,130,244,97,15,132,244, + 157,131,252,255,7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252, + 241,195,248,1,217,232,217,201,217,252,241,195,248,2,131,252,255,9,15,132, + 244,247,255,15,135,244,248,217,252,236,217,201,217,252,241,195,248,1,217, + 252,254,195,248,2,131,252,255,11,15,132,244,247,15,135,244,255,217,252,255, + 195,248,1,217,252,242,221,216,195,255,248,9,204,255,248,162,255,131,252,255, + 1,15,132,244,247,15,135,244,248,252,242,15,88,193,195,248,1,252,242,15,92, + 193,195,248,2,131,252,255,3,15,132,244,247,15,135,244,248,252,242,15,89,193, + 195,248,1,252,242,15,94,193,195,248,2,131,252,255,5,15,130,244,156,15,132, + 244,116,131,252,255,7,15,132,244,247,15,135,244,248,72,184,237,237,255,102, + 72,15,110,200,15,87,193,195,248,1,72,184,237,237,102,72,15,110,200,15,84, + 193,195,248,2,131,252,255,9,15,135,244,248,252,242,15,17,68,36,252,248,252, + 242,15,17,76,36,252,240,221,68,36,252,248,221,68,36,252,240,15,132,244,247, + 217,252,243,248,7,221,92,36,252,248,252,242,15,16,68,36,252,248,195,248,1, + 217,201,217,252,253,221,217,252,233,244,7,248,2,131,252,255,11,15,132,244, + 247,15,135,244,255,252,242,15,93,193,195,248,1,252,242,15,95,193,195,248, + 9,204,255,139,68,36,20,221,68,36,4,221,68,36,12,131,252,248,1,15,132,244, + 247,15,135,244,248,222,193,195,248,1,222,252,233,195,248,2,131,252,248,3, + 15,132,244,247,15,135,244,248,222,201,195,248,1,222,252,249,195,248,2,131, + 252,248,5,15,130,244,156,15,132,244,116,131,252,248,7,15,132,244,247,15,135, + 244,248,255,221,216,217,224,195,248,1,221,216,217,225,195,248,2,131,252,248, + 9,15,132,244,247,15,135,244,248,217,252,243,195,248,1,217,201,217,252,253, + 221,217,195,248,2,131,252,248,11,15,132,244,247,15,135,244,255,255,219,252, + 233,219,209,221,217,195,248,1,219,252,233,218,209,221,217,195,255,221,225, + 223,224,252,246,196,1,15,132,244,248,217,201,248,2,221,216,195,248,1,221, + 225,223,224,252,246,196,1,15,133,244,248,217,201,248,2,221,216,195,255,248, + 163,137,252,248,83,15,162,137,6,137,94,4,137,78,8,137,86,12,91,195,248,164, + 255,85,72,137,229,83,72,137,252,251,139,131,233,72,41,196,255,15,182,139, + 233,131,252,233,1,15,136,244,248,248,1,72,139,132,253,203,233,72,137,132, + 253,204,233,131,252,233,1,15,137,244,1,248,2,15,182,131,233,72,139,187,233, + 72,139,179,233,72,139,147,233,72,139,139,233,76,139,131,233,76,139,139,233, + 133,192,15,132,244,251,15,40,131,233,15,40,139,233,255,15,40,147,233,15,40, + 155,233,131,252,248,4,15,134,244,251,15,40,163,233,15,40,171,233,15,40,179, + 233,15,40,187,233,248,5,252,255,147,233,72,137,131,233,15,41,131,233,72,137, + 147,233,15,41,139,233,255,72,139,93,252,248,201,195,255,248,165,255,249,255, + 129,124,253,202,4,239,15,133,244,253,129,124,253,194,4,239,15,133,244,254, + 139,44,202,131,195,4,59,44,194,255,15,141,244,255,255,15,140,244,255,255, + 15,143,244,255,255,15,142,244,255,255,248,6,15,183,67,252,254,141,156,253, + 131,233,248,9,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255, + 36,252,238,248,7,15,135,244,43,129,124,253,194,4,239,15,130,244,247,15,133, + 244,43,255,252,242,15,42,4,194,252,233,244,248,255,221,4,202,219,4,194,252, + 233,244,249,255,248,8,15,135,244,43,255,252,242,15,42,12,202,252,242,15,16, + 4,194,131,195,4,102,15,46,193,255,15,134,244,9,255,15,135,244,9,255,15,130, + 244,9,255,15,131,244,9,255,252,233,244,6,255,219,4,202,252,233,244,248,255, + 129,124,253,202,4,239,15,131,244,43,129,124,253,194,4,239,15,131,244,43,255, + 248,1,252,242,15,16,4,194,248,2,131,195,4,102,15,46,4,202,248,3,255,248,1, + 221,4,202,248,2,221,4,194,248,3,131,195,4,255,223,252,233,221,216,255,218, + 252,233,223,224,158,255,15,134,244,247,255,15,135,244,247,255,15,130,244, + 247,255,15,131,244,247,255,15,183,67,252,254,141,156,253,131,233,248,1,139, + 3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255,139, + 108,194,4,131,195,4,255,129,252,253,239,15,133,244,253,129,124,253,202,4, + 239,15,133,244,254,139,44,194,59,44,202,255,15,133,244,255,255,15,132,244, + 255,255,15,183,67,252,254,141,156,253,131,233,248,9,139,3,15,182,204,15,182, + 232,131,195,4,193,232,16,65,252,255,36,252,238,248,7,15,135,244,251,129,124, + 253,202,4,239,15,130,244,247,15,133,244,251,255,252,242,15,42,4,202,255,219, + 4,202,255,252,233,244,248,248,8,15,135,244,251,255,252,242,15,42,4,194,102, + 15,46,4,202,255,219,4,194,221,4,202,255,252,233,244,250,255,129,252,253,239, + 15,131,244,251,129,124,253,202,4,239,15,131,244,251,255,248,1,252,242,15, + 16,4,202,248,2,102,15,46,4,194,248,4,255,248,1,221,4,202,248,2,221,4,194, + 248,4,255,15,138,244,248,15,133,244,248,255,15,138,244,248,15,132,244,247, + 255,248,1,15,183,67,252,254,141,156,253,131,233,248,2,255,248,2,15,183,67, + 252,254,141,156,253,131,233,248,1,255,252,233,244,9,255,248,5,255,129,252, + 253,239,15,132,244,48,129,124,253,202,4,239,15,132,244,48,255,57,108,202, + 4,15,133,244,2,129,252,253,239,15,131,244,1,139,12,202,139,4,194,57,193,15, + 132,244,1,129,252,253,239,15,135,244,2,139,169,233,133,252,237,15,132,244, + 2,252,246,133,233,235,15,133,244,2,255,49,252,237,255,189,1,0,0,0,255,252, + 233,244,47,255,248,3,129,252,253,239,255,15,133,244,9,255,252,233,244,48, + 255,72,252,247,208,139,108,202,4,131,195,4,129,252,253,239,15,133,244,249, + 139,12,202,65,59,12,135,255,139,108,202,4,131,195,4,255,129,252,253,239,15, + 133,244,253,65,129,124,253,199,4,239,15,133,244,254,65,139,44,199,59,44,202, + 255,15,183,67,252,254,141,156,253,131,233,248,9,139,3,15,182,204,15,182,232, + 131,195,4,193,232,16,65,252,255,36,252,238,248,7,15,135,244,249,65,129,124, + 253,199,4,239,15,130,244,247,255,252,242,65,15,42,4,199,255,65,219,4,199, + 255,252,233,244,248,248,8,255,252,242,15,42,4,202,102,65,15,46,4,199,255, + 219,4,202,65,221,4,199,255,129,252,253,239,15,131,244,249,255,248,1,252,242, + 65,15,16,4,199,248,2,102,15,46,4,202,248,4,255,248,1,65,221,4,199,248,2,221, + 4,202,248,4,255,72,252,247,208,139,108,202,4,131,195,4,57,197,255,15,133, + 244,249,15,183,67,252,254,141,156,253,131,233,248,2,139,3,15,182,204,15,182, + 232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,129,252,253,239,15,133, + 244,2,252,233,244,48,255,15,132,244,248,129,252,253,239,15,132,244,48,15, + 183,67,252,254,141,156,253,131,233,248,2,139,3,15,182,204,15,182,232,131, + 195,4,193,232,16,65,252,255,36,252,238,255,139,108,194,4,131,195,4,129,252, + 253,239,255,137,108,202,4,139,44,194,137,44,202,255,72,139,44,194,72,137, + 44,202,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252, + 238,255,49,252,237,129,124,253,194,4,239,129,213,239,137,108,202,4,139,3, + 15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255,129, + 124,253,194,4,239,15,133,244,251,139,44,194,252,247,221,15,128,244,250,199, + 68,202,4,237,137,44,202,248,9,139,3,15,182,204,15,182,232,131,195,4,193,232, + 16,65,252,255,36,252,238,248,4,199,68,202,4,0,0,224,65,199,4,202,0,0,0,0, + 252,233,244,9,248,5,15,135,244,53,255,129,124,253,194,4,239,15,131,244,53, + 255,252,242,15,16,4,194,72,184,237,237,102,72,15,110,200,15,87,193,252,242, + 15,17,4,202,255,221,4,194,217,224,221,28,202,255,129,124,253,194,4,239,15, + 133,244,248,139,4,194,255,139,128,233,248,1,199,68,202,4,237,137,4,202,255, + 15,87,192,252,242,15,42,128,233,248,1,252,242,15,17,4,202,255,219,128,233, + 248,1,221,28,202,255,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65, + 252,255,36,252,238,248,2,129,124,253,194,4,239,15,133,244,56,139,60,194,255, + 139,175,233,131,252,253,0,15,133,244,255,248,3,255,248,57,137,213,232,251, + 1,21,255,252,242,15,42,192,255,137,252,234,15,182,75,252,253,252,233,244, + 1,255,248,9,252,246,133,233,235,15,133,244,3,252,233,244,56,255,15,182,252, + 236,15,182,192,255,129,124,253,252,234,4,239,15,133,244,50,65,129,124,253, + 199,4,239,15,133,244,50,139,44,252,234,65,3,44,199,15,128,244,49,255,129, + 124,253,252,234,4,239,15,133,244,52,65,129,124,253,199,4,239,15,133,244,52, + 65,139,4,199,3,4,252,234,15,128,244,51,255,129,124,253,252,234,4,239,15,133, + 244,55,129,124,253,194,4,239,15,133,244,55,139,44,252,234,3,44,194,15,128, + 244,54,255,199,68,202,4,237,255,129,124,253,252,234,4,239,15,131,244,50,255, + 65,129,124,253,199,4,239,15,131,244,50,255,252,242,15,16,4,252,234,252,242, + 65,15,88,4,199,255,221,4,252,234,65,220,4,199,255,129,124,253,252,234,4,239, + 15,131,244,52,255,65,129,124,253,199,4,239,15,131,244,52,255,252,242,65,15, + 16,4,199,252,242,15,88,4,252,234,255,65,221,4,199,220,4,252,234,255,129,124, + 253,252,234,4,239,15,131,244,55,129,124,253,194,4,239,15,131,244,55,255,252, + 242,15,16,4,252,234,252,242,15,88,4,194,255,221,4,252,234,220,4,194,255,129, + 124,253,252,234,4,239,15,133,244,50,65,129,124,253,199,4,239,15,133,244,50, + 139,44,252,234,65,43,44,199,15,128,244,49,255,129,124,253,252,234,4,239,15, + 133,244,52,65,129,124,253,199,4,239,15,133,244,52,65,139,4,199,43,4,252,234, + 15,128,244,51,255,129,124,253,252,234,4,239,15,133,244,55,129,124,253,194, + 4,239,15,133,244,55,139,44,252,234,43,44,194,15,128,244,54,255,252,242,15, + 16,4,252,234,252,242,65,15,92,4,199,255,221,4,252,234,65,220,36,199,255,252, + 242,65,15,16,4,199,252,242,15,92,4,252,234,255,65,221,4,199,220,36,252,234, + 255,252,242,15,16,4,252,234,252,242,15,92,4,194,255,221,4,252,234,220,36, + 194,255,129,124,253,252,234,4,239,15,133,244,50,65,129,124,253,199,4,239, + 15,133,244,50,139,44,252,234,65,15,175,44,199,15,128,244,49,255,129,124,253, + 252,234,4,239,15,133,244,52,65,129,124,253,199,4,239,15,133,244,52,65,139, + 4,199,15,175,4,252,234,15,128,244,51,255,129,124,253,252,234,4,239,15,133, + 244,55,129,124,253,194,4,239,15,133,244,55,139,44,252,234,15,175,44,194,15, + 128,244,54,255,252,242,15,16,4,252,234,252,242,65,15,89,4,199,255,221,4,252, + 234,65,220,12,199,255,252,242,65,15,16,4,199,252,242,15,89,4,252,234,255, + 65,221,4,199,220,12,252,234,255,252,242,15,16,4,252,234,252,242,15,89,4,194, + 255,221,4,252,234,220,12,194,255,252,242,15,16,4,252,234,252,242,65,15,94, + 4,199,255,221,4,252,234,65,220,52,199,255,252,242,65,15,16,4,199,252,242, + 15,94,4,252,234,255,65,221,4,199,220,52,252,234,255,252,242,15,16,4,252,234, + 252,242,15,94,4,194,255,221,4,252,234,220,52,194,255,252,242,15,16,4,252, + 234,252,242,65,15,16,12,199,255,221,4,252,234,65,221,4,199,255,252,242,65, + 15,16,4,199,252,242,15,16,12,252,234,255,65,221,4,199,221,4,252,234,255,252, + 242,15,16,4,252,234,252,242,15,16,12,194,255,221,4,252,234,221,4,194,255, + 248,166,232,244,156,255,252,233,244,166,255,232,244,116,255,15,182,252,236, + 15,182,192,139,124,36,24,137,151,233,141,52,194,137,194,41,252,234,248,35, + 137,252,253,137,92,36,28,232,251,1,27,139,149,233,133,192,15,133,244,44,15, + 182,107,252,255,15,182,75,252,253,72,139,4,252,234,72,137,4,202,139,3,15, + 182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255,72,252, + 247,208,65,139,4,135,199,68,202,4,237,137,4,202,139,3,15,182,204,15,182,232, + 131,195,4,193,232,16,65,252,255,36,252,238,255,15,191,192,199,68,202,4,237, + 137,4,202,255,15,191,192,252,242,15,42,192,252,242,15,17,4,202,255,223,67, + 252,254,221,28,202,255,252,242,65,15,16,4,199,252,242,15,17,4,202,255,65, + 221,4,199,221,28,202,255,72,252,247,208,137,68,202,4,139,3,15,182,204,15, + 182,232,131,195,4,193,232,16,65,252,255,36,252,238,255,141,76,202,12,141, + 68,194,4,189,237,137,105,252,248,248,1,137,41,131,193,8,57,193,15,134,244, + 1,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238, + 255,139,106,252,248,139,172,253,133,233,139,173,233,72,139,69,0,72,137,4, + 202,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238, + 255,139,106,252,248,139,172,253,141,233,128,189,233,0,139,173,233,139,12, + 194,139,68,194,4,137,77,0,137,69,4,15,132,244,247,252,246,133,233,235,15, + 133,244,248,248,1,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252, + 255,36,252,238,248,2,129,232,239,129,252,248,239,15,134,244,1,252,246,129, + 233,235,15,132,244,1,137,252,238,137,213,65,141,190,233,255,232,251,1,28, + 137,252,234,252,233,244,1,255,72,252,247,208,139,106,252,248,139,172,253, + 141,233,65,139,12,135,139,133,233,137,8,199,64,4,237,252,246,133,233,235, + 15,133,244,248,248,1,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65, + 252,255,36,252,238,248,2,252,246,129,233,235,15,132,244,1,128,189,233,0,15, + 132,244,1,137,213,137,198,65,141,190,233,232,251,1,28,137,252,234,252,233, + 244,1,255,139,106,252,248,255,252,242,65,15,16,4,199,255,139,172,253,141, + 233,139,141,233,255,252,242,15,17,1,255,221,25,255,72,252,247,208,139,106, + 252,248,139,172,253,141,233,139,141,233,137,65,4,139,3,15,182,204,15,182, + 232,131,195,4,193,232,16,65,252,255,36,252,238,255,141,156,253,131,233,139, + 108,36,24,131,189,233,0,15,132,244,247,137,149,233,141,52,202,137,252,239, + 232,251,1,29,139,149,233,248,1,139,3,15,182,204,15,182,232,131,195,4,193, + 232,16,65,252,255,36,252,238,255,72,252,247,208,139,108,36,24,137,149,233, + 139,82,252,248,65,139,52,135,137,252,239,137,92,36,28,232,251,1,30,139,149, + 233,15,182,75,252,253,137,4,202,199,68,202,4,237,139,3,15,182,204,15,182, + 232,131,195,4,193,232,16,65,252,255,36,252,238,255,139,108,36,24,137,149, + 233,65,139,142,233,65,59,142,233,137,92,36,28,15,131,244,251,248,1,137,194, + 37,252,255,7,0,0,193,252,234,11,61,252,255,7,0,0,15,132,244,249,248,2,137, + 252,239,137,198,232,251,1,31,139,149,233,15,182,75,252,253,137,4,202,199, + 68,202,4,237,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255, + 36,252,238,248,3,184,1,8,0,0,252,233,244,2,248,5,137,252,239,232,251,1,32, + 15,183,67,252,254,252,233,244,1,255,72,252,247,208,139,108,36,24,65,139,142, + 233,137,92,36,28,65,59,142,233,137,149,233,15,131,244,249,248,2,65,139,52, + 135,137,252,239,232,251,1,33,139,149,233,15,182,75,252,253,137,4,202,199, + 68,202,4,237,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255, + 36,252,238,248,3,137,252,239,232,251,1,32,15,183,67,252,254,72,252,247,208, + 252,233,244,2,255,72,252,247,208,139,106,252,248,139,173,233,65,139,4,135, + 252,233,244,167,255,72,252,247,208,139,106,252,248,139,173,233,65,139,4,135, + 252,233,244,168,255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15, + 133,244,38,139,44,252,234,255,129,124,253,194,4,239,15,133,244,251,139,4, + 194,255,129,124,253,194,4,239,15,131,244,251,255,252,242,15,16,4,194,252, + 242,15,45,192,252,242,15,42,200,102,15,46,193,255,15,133,244,38,255,59,133, + 233,15,131,244,38,193,224,3,3,133,233,129,120,253,4,239,15,132,244,248,72, + 139,40,72,137,44,202,248,1,139,3,15,182,204,15,182,232,131,195,4,193,232, + 16,65,252,255,36,252,238,248,2,131,189,233,0,15,132,244,249,139,141,233,252, + 246,129,233,235,15,132,244,38,15,182,75,252,253,248,3,199,68,202,4,237,252, + 233,244,1,248,5,255,129,124,253,194,4,239,15,133,244,38,139,4,194,252,233, + 244,167,255,15,182,252,236,15,182,192,72,252,247,208,65,139,4,135,129,124, + 253,252,234,4,239,15,133,244,36,139,44,252,234,248,167,139,141,233,35,136, + 233,105,201,239,3,141,233,248,1,129,185,233,239,15,133,244,250,57,129,233, + 15,133,244,250,129,121,253,4,239,15,132,244,251,15,182,67,252,253,72,139, + 41,72,137,44,194,248,2,255,139,3,15,182,204,15,182,232,131,195,4,193,232, + 16,65,252,255,36,252,238,248,3,15,182,67,252,253,199,68,194,4,237,252,233, + 244,2,248,4,139,137,233,133,201,15,133,244,1,248,5,139,141,233,133,201,15, + 132,244,3,252,246,129,233,235,15,133,244,3,252,233,244,36,255,15,182,252, + 236,15,182,192,129,124,253,252,234,4,239,15,133,244,37,139,44,252,234,59, + 133,233,15,131,244,37,193,224,3,3,133,233,129,120,253,4,239,15,132,244,248, + 72,139,40,72,137,44,202,248,1,139,3,15,182,204,15,182,232,131,195,4,193,232, + 16,65,252,255,36,252,238,248,2,131,189,233,0,15,132,244,249,139,141,233,252, + 246,129,233,235,15,132,244,37,255,15,182,75,252,253,248,3,199,68,202,4,237, + 252,233,244,1,255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15, + 133,244,41,139,44,252,234,255,15,133,244,41,255,59,133,233,15,131,244,41, + 193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248,1,252,246,133,233, + 235,15,133,244,253,248,2,72,139,44,202,72,137,40,139,3,15,182,204,15,182, + 232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,131,189,233,0,15,132, + 244,1,139,141,233,252,246,129,233,235,255,15,132,244,41,15,182,75,252,253, + 252,233,244,1,248,5,129,124,253,194,4,239,15,133,244,41,139,4,194,252,233, + 244,168,248,7,128,165,233,235,65,139,142,233,65,137,174,233,137,141,233,15, + 182,75,252,253,252,233,244,2,255,15,182,252,236,15,182,192,72,252,247,208, + 65,139,4,135,129,124,253,252,234,4,239,15,133,244,39,139,44,252,234,248,168, + 139,141,233,35,136,233,105,201,239,198,133,233,0,3,141,233,248,1,129,185, + 233,239,15,133,244,251,57,129,233,15,133,244,251,129,121,253,4,239,15,132, + 244,250,248,2,255,252,246,133,233,235,15,133,244,253,248,3,15,182,67,252, + 253,72,139,44,194,72,137,41,139,3,15,182,204,15,182,232,131,195,4,193,232, + 16,65,252,255,36,252,238,248,4,131,189,233,0,15,132,244,2,137,12,36,139,141, + 233,252,246,129,233,235,15,132,244,39,139,12,36,252,233,244,2,248,5,139,137, + 233,133,201,15,133,244,1,255,139,141,233,133,201,15,132,244,252,252,246,129, + 233,235,15,132,244,39,248,6,137,4,36,199,68,36,4,237,137,108,36,8,139,124, + 36,24,137,151,233,72,141,20,36,137,252,238,137,252,253,137,92,36,28,232,251, + 1,34,139,149,233,139,108,36,8,137,193,252,233,244,2,248,7,128,165,233,235, + 65,139,134,233,65,137,174,233,137,133,233,252,233,244,3,255,15,182,252,236, + 15,182,192,129,124,253,252,234,4,239,15,133,244,40,139,44,252,234,59,133, + 233,15,131,244,40,193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248, + 1,252,246,133,233,235,15,133,244,253,248,2,72,139,12,202,72,137,8,139,3,15, + 182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,131,189, + 233,0,15,132,244,1,255,139,141,233,252,246,129,233,235,15,132,244,40,15,182, + 75,252,253,252,233,244,1,248,7,128,165,233,235,65,139,142,233,65,137,174, + 233,137,141,233,15,182,75,252,253,252,233,244,2,255,68,137,60,36,69,139,60, + 199,248,1,141,12,202,139,105,252,248,252,246,133,233,235,15,133,244,253,248, + 2,139,68,36,4,131,232,1,15,132,244,250,68,1,252,248,59,133,233,15,135,244, + 251,68,41,252,248,65,193,231,3,68,3,189,233,248,3,72,139,41,131,193,8,73, + 137,47,65,131,199,8,131,232,1,15,133,244,3,248,4,68,139,60,36,139,3,15,182, + 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,5,139,124,36, + 24,137,151,233,137,252,238,137,194,137,252,253,137,92,36,28,232,251,1,35, + 139,149,233,15,182,75,252,253,252,233,244,1,248,7,255,128,165,233,235,65, + 139,134,233,65,137,174,233,137,133,233,252,233,244,2,255,3,68,36,4,255,129, + 124,253,202,4,239,139,44,202,15,133,244,58,141,84,202,8,137,90,252,252,139, + 157,233,139,11,15,182,252,233,15,182,205,131,195,4,65,252,255,36,252,238, + 255,141,76,202,8,65,137,215,139,105,252,248,129,121,253,252,252,239,15,133, + 244,29,248,59,139,90,252,252,252,247,195,237,15,133,244,253,248,1,137,106, + 252,248,137,68,36,4,131,232,1,15,132,244,249,248,2,72,139,41,131,193,8,73, + 137,47,65,131,199,8,131,232,1,15,133,244,2,139,106,252,248,248,3,139,68,36, + 4,128,189,233,1,15,135,244,251,248,4,139,157,233,139,11,15,182,252,233,15, + 182,205,131,195,4,65,252,255,36,252,238,248,5,255,252,247,195,237,15,133, + 244,4,15,182,75,252,253,72,252,247,209,141,12,202,68,139,121,252,248,69,139, + 191,233,69,139,191,233,252,233,244,4,248,7,129,252,235,239,252,247,195,237, + 15,133,244,254,41,218,65,137,215,139,90,252,252,252,233,244,1,248,8,129,195, + 239,252,233,244,1,255,141,76,202,8,72,139,105,232,72,139,65,252,240,72,137, + 41,72,137,65,8,139,105,224,139,65,228,137,105,252,248,137,65,252,252,129, + 252,248,239,184,237,15,133,244,29,137,202,137,90,252,252,139,157,233,139, + 11,15,182,252,233,15,182,205,131,195,4,65,252,255,36,252,238,255,68,137,60, + 36,68,137,116,36,4,139,108,202,252,240,139,68,202,252,248,68,139,181,233, + 131,195,4,68,139,189,233,248,1,68,57,252,240,15,131,244,251,65,129,124,253, + 199,4,239,15,132,244,250,255,219,68,202,252,248,255,73,139,44,199,72,137, + 108,202,8,131,192,1,255,137,68,202,252,248,248,2,15,183,67,252,254,141,156, + 253,131,233,248,3,68,139,116,36,4,68,139,60,36,139,3,15,182,204,15,182,232, + 131,195,4,193,232,16,65,252,255,36,252,238,248,4,131,192,1,255,137,68,202, + 252,248,255,252,233,244,1,248,5,68,41,252,240,248,6,59,133,233,15,135,244, + 3,68,105,252,248,239,68,3,189,233,65,129,191,233,239,15,132,244,253,70,141, + 116,48,1,73,139,175,233,73,139,135,233,72,137,44,202,72,137,68,202,8,68,137, + 116,202,252,248,252,233,244,2,248,7,131,192,1,252,233,244,6,255,129,124,253, + 202,252,236,239,15,133,244,251,139,108,202,232,129,124,253,202,252,244,239, + 15,133,244,251,129,124,253,202,252,252,239,15,133,244,251,128,189,233,235, + 15,133,244,251,141,156,253,131,233,199,68,202,252,248,0,0,0,0,248,1,139,3, + 15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,5,198, + 67,252,252,235,141,156,253,131,233,198,3,235,252,233,244,1,255,15,182,252, + 236,15,182,192,68,137,60,36,68,141,188,253,194,233,141,12,202,68,43,122,252, + 252,133,252,237,15,132,244,251,141,108,252,233,252,248,65,57,215,15,131,244, + 248,248,1,73,139,71,252,248,65,131,199,8,72,137,1,131,193,8,57,252,233,15, + 131,244,249,65,57,215,15,130,244,1,248,2,199,65,4,237,131,193,8,57,252,233, + 15,130,244,2,248,3,68,139,60,36,139,3,15,182,204,15,182,232,131,195,4,193, + 232,16,65,252,255,36,252,238,248,5,199,68,36,4,1,0,0,0,137,208,68,41,252, + 248,15,134,244,3,137,197,193,252,237,3,131,197,1,137,108,36,4,139,108,36, + 24,1,200,59,133,233,15,135,244,253,248,6,255,73,139,71,252,248,65,131,199, + 8,72,137,1,131,193,8,65,57,215,15,130,244,6,252,233,244,3,248,7,137,149,233, + 137,141,233,137,92,36,28,65,41,215,139,116,36,4,131,252,238,1,137,252,239, + 232,251,1,0,139,149,233,139,141,233,65,1,215,252,233,244,6,255,193,225,3, + 255,248,1,139,90,252,252,137,68,36,4,252,247,195,237,15,133,244,253,255,248, + 13,65,137,215,131,232,1,15,132,244,249,248,2,73,139,44,15,73,137,111,252, + 248,65,131,199,8,131,232,1,15,133,244,2,248,3,139,68,36,4,15,182,107,252, + 255,248,5,57,197,15,135,244,252,255,72,139,44,10,72,137,106,252,248,255,248, + 5,56,67,252,255,15,135,244,252,255,15,182,75,252,253,72,252,247,209,141,20, + 202,68,139,122,252,248,69,139,191,233,69,139,191,233,139,3,15,182,204,15, + 182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,6,255,65,199,71,252, + 252,237,65,131,199,8,255,199,68,194,252,244,237,255,131,192,1,252,233,244, + 5,248,7,141,171,233,252,247,197,237,15,133,244,14,41,252,234,255,1,252,233, + 255,137,221,209,252,237,129,229,239,102,65,131,172,253,46,233,1,15,132,244, + 148,255,141,12,202,255,129,121,253,4,239,15,133,244,255,255,129,121,253,12, + 239,15,133,244,60,129,121,253,20,239,15,133,244,60,139,41,131,121,16,0,15, + 140,244,251,255,129,121,253,12,239,15,133,244,165,129,121,253,20,239,15,133, + 244,165,255,139,105,16,133,252,237,15,136,244,251,3,41,15,128,244,247,137, + 41,255,59,105,8,199,65,28,237,137,105,24,255,15,142,244,253,248,1,248,6,141, + 156,253,131,233,255,141,156,253,131,233,15,183,67,252,254,15,142,245,248, + 1,248,6,255,15,143,244,253,248,6,141,156,253,131,233,248,1,255,248,7,139, + 3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,5, + 255,3,41,15,128,244,1,137,41,255,15,141,244,7,255,141,156,253,131,233,15, + 183,67,252,254,15,141,245,255,15,140,244,7,255,252,233,244,6,248,9,255,129, + 121,253,4,239,255,15,131,244,60,129,121,253,12,239,15,131,244,60,255,129, + 121,253,12,239,15,131,244,165,129,121,253,20,239,15,131,244,165,255,139,105, + 20,255,129,252,253,239,15,131,244,60,255,252,242,15,16,1,252,242,15,16,73, + 8,255,252,242,15,88,65,16,252,242,15,17,1,133,252,237,15,136,244,249,255, + 15,140,244,249,255,102,15,46,200,248,1,252,242,15,17,65,24,255,221,65,8,221, + 1,255,220,65,16,221,17,221,81,24,133,252,237,15,136,244,247,255,221,81,24, + 15,140,244,247,255,217,201,248,1,255,15,183,67,252,254,255,15,131,244,7,255, + 15,131,244,248,141,156,253,131,233,255,141,156,253,131,233,15,183,67,252, + 254,15,131,245,255,15,130,244,7,255,15,130,244,248,141,156,253,131,233,255, + 248,3,102,15,46,193,252,233,244,1,255,141,12,202,139,105,4,129,252,253,239, + 15,132,244,247,255,137,105,252,252,139,41,137,105,252,248,252,233,245,255, + 141,156,253,131,233,139,1,137,105,252,252,137,65,252,248,255,65,139,142,233, + 139,4,129,72,139,128,233,139,108,36,24,65,137,150,233,65,137,174,233,76,137, + 36,36,76,137,108,36,8,72,131,252,236,16,252,255,224,255,141,156,253,131,233, + 139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255, + 137,221,209,252,237,129,229,239,102,65,131,172,253,46,233,1,15,132,244,150, + 255,68,139,187,233,139,108,36,24,141,12,202,59,141,233,15,135,244,24,15,182, + 139,233,57,200,15,134,244,249,248,2,255,15,183,67,252,254,252,233,245,255, + 248,3,199,68,194,252,252,237,131,192,1,57,200,15,134,244,3,252,233,244,2, + 255,141,44,197,237,141,4,194,68,139,122,252,248,137,104,252,252,68,137,120, + 252,248,139,108,36,24,141,12,200,59,141,233,15,135,244,23,137,209,137,194, + 15,182,171,233,133,252,237,15,132,244,248,248,1,131,193,8,57,209,15,131,244, + 249,68,139,121,252,248,68,137,56,68,139,121,252,252,68,137,120,4,131,192, + 8,199,65,252,252,237,131,252,237,1,15,133,244,1,248,2,255,68,139,187,233, + 139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255, + 248,3,199,64,4,237,131,192,8,131,252,237,1,15,133,244,3,252,233,244,2,255, + 139,106,252,248,76,139,189,233,139,108,36,24,141,68,194,252,248,137,149,233, + 141,136,233,59,141,233,137,133,233,255,76,137,252,254,137,252,239,255,15, + 135,244,22,65,199,134,233,237,255,65,252,255,215,255,65,252,255,150,233,255, + 65,199,134,233,237,139,149,233,141,12,194,252,247,217,3,141,233,139,90,252, + 252,252,233,244,12,255,254,0 +}; + +enum { + GLOB_vm_returnp, + GLOB_cont_dispatch, + GLOB_vm_returnc, + GLOB_BC_RET_Z, + GLOB_vm_return, + GLOB_vm_leave_cp, + GLOB_vm_leave_unw, + GLOB_vm_unwind_c, + GLOB_vm_unwind_c_eh, + GLOB_vm_unwind_rethrow, + GLOB_vm_unwind_ff, + GLOB_vm_unwind_ff_eh, + GLOB_vm_growstack_c, + GLOB_vm_growstack_v, + GLOB_vm_growstack_f, + GLOB_vm_resume, + GLOB_vm_pcall, + GLOB_vm_call, + GLOB_vm_call_dispatch, + GLOB_vmeta_call, + GLOB_vm_call_dispatch_f, + GLOB_vm_cpcall, + GLOB_vm_call_tail, + GLOB_cont_cat, + GLOB_cont_ra, + GLOB_BC_CAT_Z, + GLOB_vmeta_tgets, + GLOB_vmeta_tgetb, + GLOB_vmeta_tgetv, + GLOB_vmeta_tsets, + GLOB_vmeta_tsetb, + GLOB_vmeta_tsetv, + GLOB_cont_nop, + GLOB_vmeta_comp, + GLOB_vmeta_binop, + GLOB_cont_condt, + GLOB_cont_condf, + GLOB_vmeta_equal, + GLOB_vmeta_equal_cd, + GLOB_vmeta_arith_vno, + GLOB_vmeta_arith_vn, + GLOB_vmeta_arith_nvo, + GLOB_vmeta_arith_nv, + GLOB_vmeta_unm, + GLOB_vmeta_arith_vvo, + GLOB_vmeta_arith_vv, + GLOB_vmeta_len, + GLOB_BC_LEN_Z, + GLOB_vmeta_call_ra, + GLOB_BC_CALLT_Z, + GLOB_vmeta_for, + GLOB_ff_assert, + GLOB_fff_fallback, + GLOB_fff_res_, + GLOB_ff_type, + GLOB_fff_res1, + GLOB_ff_getmetatable, + GLOB_ff_setmetatable, + GLOB_ff_rawget, + GLOB_ff_tonumber, + GLOB_fff_resi, + GLOB_fff_resxmm0, + GLOB_fff_resn, + GLOB_ff_tostring, + GLOB_fff_gcstep, + GLOB_ff_next, + GLOB_fff_res2, + GLOB_fff_res, + GLOB_ff_pairs, + GLOB_ff_ipairs_aux, + GLOB_fff_res0, + GLOB_ff_ipairs, + GLOB_ff_pcall, + GLOB_ff_xpcall, + GLOB_ff_coroutine_resume, + GLOB_ff_coroutine_wrap_aux, + GLOB_ff_coroutine_yield, + GLOB_ff_math_abs, + GLOB_fff_resbit, + GLOB_ff_math_floor, + GLOB_vm_floor, + GLOB_ff_math_ceil, + GLOB_vm_ceil, + GLOB_ff_math_sqrt, + GLOB_ff_math_log, + GLOB_ff_math_log10, + GLOB_ff_math_exp, + GLOB_vm_exp_x87, + GLOB_ff_math_sin, + GLOB_ff_math_cos, + GLOB_ff_math_tan, + GLOB_ff_math_asin, + GLOB_ff_math_acos, + GLOB_ff_math_atan, + GLOB_ff_math_sinh, + GLOB_ff_math_cosh, + GLOB_ff_math_tanh, + GLOB_ff_math_deg, + GLOB_ff_math_rad, + GLOB_ff_math_atan2, + GLOB_ff_math_ldexp, + GLOB_ff_math_frexp, + GLOB_ff_math_modf, + GLOB_vm_trunc, + GLOB_ff_math_fmod, + GLOB_ff_math_pow, + GLOB_vm_pow, + GLOB_ff_math_min, + GLOB_ff_math_max, + GLOB_ff_string_len, + GLOB_ff_string_byte, + GLOB_ff_string_char, + GLOB_fff_newstr, + GLOB_ff_string_sub, + GLOB_fff_emptystr, + GLOB_ff_string_rep, + GLOB_fff_fallback_2, + GLOB_ff_string_reverse, + GLOB_fff_fallback_1, + GLOB_ff_string_lower, + GLOB_ff_string_upper, + GLOB_ff_table_getn, + GLOB_ff_bit_tobit, + GLOB_ff_bit_band, + GLOB_fff_fallback_bit_op, + GLOB_ff_bit_bor, + GLOB_ff_bit_bxor, + GLOB_ff_bit_bswap, + GLOB_ff_bit_bnot, + GLOB_ff_bit_lshift, + GLOB_ff_bit_rshift, + GLOB_ff_bit_arshift, + GLOB_ff_bit_rol, + GLOB_ff_bit_ror, + GLOB_vm_record, + GLOB_vm_rethook, + GLOB_vm_inshook, + GLOB_cont_hook, + GLOB_vm_hotloop, + GLOB_vm_callhook, + GLOB_vm_hotcall, + GLOB_vm_exit_handler, + GLOB_vm_exit_interp, + GLOB_vm_floor_sse, + GLOB_vm_ceil_sse, + GLOB_vm_trunc_sse, + GLOB_vm_mod, + GLOB_vm_exp2_x87, + GLOB_vm_exp2raw, + GLOB_vm_pow_sse, + GLOB_vm_powi_sse, + GLOB_vm_foldfpm, + GLOB_vm_foldarith, + GLOB_vm_cpuid, + GLOB_vm_ffi_call, + GLOB_assert_bad_for_arg_type, + GLOB_BC_MODVN_Z, + GLOB_BC_TGETS_Z, + GLOB_BC_TSETS_Z, + GLOB__MAX +}; +static const char *const globnames[] = { + "vm_returnp", + "cont_dispatch", + "vm_returnc", + "BC_RET_Z", + "vm_return", + "vm_leave_cp", + "vm_leave_unw", + "vm_unwind_c@8", + "vm_unwind_c_eh", + "vm_unwind_rethrow", + "vm_unwind_ff@4", + "vm_unwind_ff_eh", + "vm_growstack_c", + "vm_growstack_v", + "vm_growstack_f", + "vm_resume", + "vm_pcall", + "vm_call", + "vm_call_dispatch", + "vmeta_call", + "vm_call_dispatch_f", + "vm_cpcall", + "vm_call_tail", + "cont_cat", + "cont_ra", + "BC_CAT_Z", + "vmeta_tgets", + "vmeta_tgetb", + "vmeta_tgetv", + "vmeta_tsets", + "vmeta_tsetb", + "vmeta_tsetv", + "cont_nop", + "vmeta_comp", + "vmeta_binop", + "cont_condt", + "cont_condf", + "vmeta_equal", + "vmeta_equal_cd", + "vmeta_arith_vno", + "vmeta_arith_vn", + "vmeta_arith_nvo", + "vmeta_arith_nv", + "vmeta_unm", + "vmeta_arith_vvo", + "vmeta_arith_vv", + "vmeta_len", + "BC_LEN_Z", + "vmeta_call_ra", + "BC_CALLT_Z", + "vmeta_for", + "ff_assert", + "fff_fallback", + "fff_res_", + "ff_type", + "fff_res1", + "ff_getmetatable", + "ff_setmetatable", + "ff_rawget", + "ff_tonumber", + "fff_resi", + "fff_resxmm0", + "fff_resn", + "ff_tostring", + "fff_gcstep", + "ff_next", + "fff_res2", + "fff_res", + "ff_pairs", + "ff_ipairs_aux", + "fff_res0", + "ff_ipairs", + "ff_pcall", + "ff_xpcall", + "ff_coroutine_resume", + "ff_coroutine_wrap_aux", + "ff_coroutine_yield", + "ff_math_abs", + "fff_resbit", + "ff_math_floor", + "vm_floor", + "ff_math_ceil", + "vm_ceil", + "ff_math_sqrt", + "ff_math_log", + "ff_math_log10", + "ff_math_exp", + "vm_exp_x87", + "ff_math_sin", + "ff_math_cos", + "ff_math_tan", + "ff_math_asin", + "ff_math_acos", + "ff_math_atan", + "ff_math_sinh", + "ff_math_cosh", + "ff_math_tanh", + "ff_math_deg", + "ff_math_rad", + "ff_math_atan2", + "ff_math_ldexp", + "ff_math_frexp", + "ff_math_modf", + "vm_trunc", + "ff_math_fmod", + "ff_math_pow", + "vm_pow", + "ff_math_min", + "ff_math_max", + "ff_string_len", + "ff_string_byte", + "ff_string_char", + "fff_newstr", + "ff_string_sub", + "fff_emptystr", + "ff_string_rep", + "fff_fallback_2", + "ff_string_reverse", + "fff_fallback_1", + "ff_string_lower", + "ff_string_upper", + "ff_table_getn", + "ff_bit_tobit", + "ff_bit_band", + "fff_fallback_bit_op", + "ff_bit_bor", + "ff_bit_bxor", + "ff_bit_bswap", + "ff_bit_bnot", + "ff_bit_lshift", + "ff_bit_rshift", + "ff_bit_arshift", + "ff_bit_rol", + "ff_bit_ror", + "vm_record", + "vm_rethook", + "vm_inshook", + "cont_hook", + "vm_hotloop", + "vm_callhook", + "vm_hotcall", + "vm_exit_handler", + "vm_exit_interp", + "vm_floor_sse", + "vm_ceil_sse", + "vm_trunc_sse", + "vm_mod", + "vm_exp2_x87", + "vm_exp2raw", + "vm_pow_sse", + "vm_powi_sse", + "vm_foldfpm", + "vm_foldarith", + "vm_cpuid", + "vm_ffi_call@4", + "assert_bad_for_arg_type", + "BC_MODVN_Z", + "BC_TGETS_Z", + "BC_TSETS_Z", + (const char *)0 +}; +static const char *const extnames[] = { + "lj_state_growstack@8", + "lj_err_throw@8", + "lj_meta_tget", + "lj_meta_tset", + "lj_meta_comp", + "lj_meta_equal", + "lj_meta_equal_cd@8", + "lj_meta_arith", + "lj_meta_len@8", + "lj_meta_call", + "lj_meta_for@8", + "lj_tab_get", + "lj_str_fromnumber@8", + "lj_str_fromnum@8", + "lj_tab_next", + "lj_tab_getinth@8", + "lj_ffh_coroutine_wrap_err@8", + "lj_vm_sinh", + "lj_vm_cosh", + "lj_vm_tanh", + "lj_str_new", + "lj_tab_len@4", + "lj_gc_step@4", + "lj_dispatch_ins@8", + "lj_trace_hot@8", + "lj_dispatch_call@8", + "lj_trace_exit@8", + "lj_meta_cat", + "lj_gc_barrieruv@8", + "lj_func_closeuv@8", + "lj_func_newL_gc", + "lj_tab_new", + "lj_gc_step_fixtop@4", + "lj_tab_dup@8", + "lj_tab_newkey", + "lj_tab_reasize", + (const char *)0 +}; +#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V) +#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V) +#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V) +#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V) +#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V) +#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V) +#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V) +#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V) +#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V) +#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V) +#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V) +#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V) +#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V) +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx, int cmov, int sse) +{ + dasm_put(Dst, 0); + dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C); + dasm_put(Dst, 109, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL); + dasm_put(Dst, 200, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK); + dasm_put(Dst, 302, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base)); + dasm_put(Dst, 385, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE); + dasm_put(Dst, 548, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base)); + dasm_put(Dst, 648, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL); + dasm_put(Dst, 819, 0, Dt7(->pc), PC2PROTO(k), Dt1(->base), LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB); + dasm_put(Dst, 944); + if (LJ_DUALNUM) { + dasm_put(Dst, 958, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 967); + } else { + } + dasm_put(Dst, 979, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET); + dasm_put(Dst, 1125, DISPATCH_GL(tmptv), LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 958, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 967); + } else { + } + dasm_put(Dst, 1149, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 1321, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base)); + dasm_put(Dst, 1420); +#if LJ_HASFFI + dasm_put(Dst, 1440, Dt1(->base)); +#endif + dasm_put(Dst, 1471); +#if LJ_DUALNUM + dasm_put(Dst, 1474); +#endif + dasm_put(Dst, 1480); +#if LJ_DUALNUM + dasm_put(Dst, 952); +#endif + dasm_put(Dst, 1493); +#if LJ_DUALNUM + dasm_put(Dst, 1474); +#endif + dasm_put(Dst, 1522, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base)); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1624); +#else + dasm_put(Dst, 1643); +#endif + dasm_put(Dst, 1648, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND); + dasm_put(Dst, 1834, 1+1, ~LJ_TNUMX); + if (cmov) { + dasm_put(Dst, 1903); + } else { + dasm_put(Dst, 1907); + } + dasm_put(Dst, 1916, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, ~LJ_TLIGHTUD, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL); + dasm_put(Dst, 1995, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next)); + dasm_put(Dst, 2053, LJ_TNIL, LJ_TUDATA, LJ_TNUMX, LJ_TISNUM, LJ_TLIGHTUD); + dasm_put(Dst, 2119, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB); + dasm_put(Dst, 2190, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist), 2+1, LJ_TTAB); + dasm_put(Dst, 2280, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2294); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 2316); + } else { + dasm_put(Dst, 2326); + } + dasm_put(Dst, 2333, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2402, Dt1(->base)); + if (LJ_DUALNUM) { + dasm_put(Dst, 2428); + } else { + dasm_put(Dst, 2433); + } + dasm_put(Dst, 2438, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2); + dasm_put(Dst, 2530, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2577, Dt6(->metatable)); +#endif + dasm_put(Dst, 2586, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2572); + } else { + dasm_put(Dst, 2311); + } + dasm_put(Dst, 2641); + if (LJ_DUALNUM) { + dasm_put(Dst, 2646, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2662, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + } else { + } + dasm_put(Dst, 2695, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0); + dasm_put(Dst, 2557, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2577, Dt6(->metatable)); +#endif + dasm_put(Dst, 2772, Dt8(->upvalue[0]), LJ_TFUNC); + if (LJ_DUALNUM) { + dasm_put(Dst, 2793, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2805); + } else { + dasm_put(Dst, 2815); + } + dasm_put(Dst, 2822, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC); + dasm_put(Dst, 2887, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top)); + dasm_put(Dst, 2976, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP); + dasm_put(Dst, 3063, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE); + dasm_put(Dst, 3178, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe)); + dasm_put(Dst, 3273, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top)); + dasm_put(Dst, 3339, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack)); + dasm_put(Dst, 3428, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME); + dasm_put(Dst, 3538, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status)); + if (!LJ_DUALNUM) { + dasm_put(Dst, 3565); + } + if (sse) { + dasm_put(Dst, 3568); + } + dasm_put(Dst, 3583, 1+1); + if (LJ_DUALNUM) { + dasm_put(Dst, 3594, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 3674, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3684, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32)); + } else { + dasm_put(Dst, 3715); + } + dasm_put(Dst, 3732, 1+1, FRAME_TYPE, LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 3829, LJ_TISNUM); + } else { + dasm_put(Dst, 3674, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3851); + if (LJ_DUALNUM) { + dasm_put(Dst, 3860); + } + dasm_put(Dst, 2321); + } else { + dasm_put(Dst, 3894); + if (LJ_DUALNUM) { + } else { + dasm_put(Dst, 2328); + } + } + dasm_put(Dst, 3900); + if (LJ_DUALNUM) { + dasm_put(Dst, 3829, LJ_TISNUM); + } else { + dasm_put(Dst, 3674, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3903); + if (LJ_DUALNUM) { + dasm_put(Dst, 3860); + } + dasm_put(Dst, 2321); + } else { + dasm_put(Dst, 3912); + if (LJ_DUALNUM) { + } else { + dasm_put(Dst, 2328); + } + } + if (sse) { + dasm_put(Dst, 3918, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 3947, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 3976, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4045, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4102, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4165, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM); + dasm_put(Dst, 4255); + if (sse) { + dasm_put(Dst, 4267, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4292); + if (sse) { + dasm_put(Dst, 4306, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4331); + if (sse) { + dasm_put(Dst, 4345, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4370); + if (sse) { + dasm_put(Dst, 4386, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } else { + dasm_put(Dst, 4425, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } + dasm_put(Dst, 4458, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM); + dasm_put(Dst, 4523, 1+1, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4622); + } else { + dasm_put(Dst, 4628); + } + dasm_put(Dst, 4635); + if (sse) { + dasm_put(Dst, 4660); + } else { + dasm_put(Dst, 4666); + } + dasm_put(Dst, 4669, 1+2); + if (sse) { + dasm_put(Dst, 4678); + } else { + dasm_put(Dst, 4686); + } + dasm_put(Dst, 4694); + if (sse) { + dasm_put(Dst, 4697, (unsigned int)(U64x(43500000,00000000)), (unsigned int)((U64x(43500000,00000000))>>32)); + } else { + dasm_put(Dst, 4724); + } + dasm_put(Dst, 4741); + if (sse) { + dasm_put(Dst, 4757, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4782, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4804); + if (sse) { + dasm_put(Dst, 4826); + } else { + dasm_put(Dst, 4852); + } + dasm_put(Dst, 4869, 1+2); + if (sse) { + dasm_put(Dst, 4909); + } else { + dasm_put(Dst, 4917); + } + dasm_put(Dst, 4927, 2+1, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4979, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 5026, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 5067, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5080, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4622); + } else { + } + dasm_put(Dst, 5130); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 5141, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5162); + } else { + dasm_put(Dst, 2311); + } + dasm_put(Dst, 5183); + } else { + } + dasm_put(Dst, 5208, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5221, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4622); + } else { + } + dasm_put(Dst, 5130); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 5141, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5162); + } else { + dasm_put(Dst, 2311); + } + dasm_put(Dst, 5271); + } else { + } + if (!sse) { + dasm_put(Dst, 5296); + } + dasm_put(Dst, 5305, 1+1, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 5327, Dt5(->len)); + } else if (sse) { + dasm_put(Dst, 5335, Dt5(->len)); + } else { + dasm_put(Dst, 5346, Dt5(->len)); + } + dasm_put(Dst, 5354, 1+1, LJ_TSTR, Dt5(->len), Dt5([1])); + if (LJ_DUALNUM) { + dasm_put(Dst, 5330); + } else if (sse) { + dasm_put(Dst, 5392); + } else { + dasm_put(Dst, 5402); + } + dasm_put(Dst, 5413, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5446); + } else if (sse) { + dasm_put(Dst, 5469); + } else { + dasm_put(Dst, 5495); + } + dasm_put(Dst, 5519, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5628); + } else if (sse) { + dasm_put(Dst, 5640); + } else { + dasm_put(Dst, 5655); + } + dasm_put(Dst, 5667, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2572); + } else { + dasm_put(Dst, 2311); + } + dasm_put(Dst, 5684, Dt5(->len)); + if (LJ_DUALNUM) { + dasm_put(Dst, 5694); + } else if (sse) { + dasm_put(Dst, 5698); + } else { + } + dasm_put(Dst, 5705, sizeof(GCstr)-1); + dasm_put(Dst, 5780, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 5841, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5858); + } else if (sse) { + dasm_put(Dst, 5866); + } else { + dasm_put(Dst, 5877); + } + dasm_put(Dst, 5893, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 5961, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 6028, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz)); + dasm_put(Dst, 6101, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 6186, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 6260, 1+1, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 6327); + } else if (sse) { + dasm_put(Dst, 6334); + } else { + } + dasm_put(Dst, 6344, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6360); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 106); + if (LJ_DUALNUM || sse) { + if (!sse) { + } + dasm_put(Dst, 6401); + } else { + } + dasm_put(Dst, 6406, 1+1); + if (sse) { + dasm_put(Dst, 6417, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6427); + } + dasm_put(Dst, 2288, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6452); + } else { + } + dasm_put(Dst, 6467, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6492); + } else { + dasm_put(Dst, 6512); + } + if (sse) { + dasm_put(Dst, 6517); + } else { + } + dasm_put(Dst, 6534, 1+1); + if (sse) { + dasm_put(Dst, 6417, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6427); + } + dasm_put(Dst, 2288, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6452); + } else { + } + dasm_put(Dst, 6467, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6552); + } else { + dasm_put(Dst, 6512); + } + if (sse) { + dasm_put(Dst, 6572); + } else { + } + dasm_put(Dst, 6589, 1+1); + if (sse) { + dasm_put(Dst, 6417, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6427); + } + dasm_put(Dst, 2288, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6452); + } else { + } + dasm_put(Dst, 6467, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6607); + } else { + dasm_put(Dst, 6512); + } + if (sse) { + dasm_put(Dst, 6627); + } else { + } + dasm_put(Dst, 6644, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6667, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6691); + if (LJ_DUALNUM) { + dasm_put(Dst, 6401); + } else if (sse) { + dasm_put(Dst, 6697); + } else { + } + dasm_put(Dst, 6709); + if (LJ_DUALNUM) { + dasm_put(Dst, 6720, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6736, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6751, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6818); + if (LJ_DUALNUM) { + dasm_put(Dst, 6825, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6736, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6841, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6908); + if (LJ_DUALNUM) { + dasm_put(Dst, 6916, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6736, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6932, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6999); + if (LJ_DUALNUM) { + dasm_put(Dst, 7007, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6736, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7023, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 7090); + if (LJ_DUALNUM) { + dasm_put(Dst, 7097, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6435); + } else { + dasm_put(Dst, 2311); + } + if (sse) { + dasm_put(Dst, 6377, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6736, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7113, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 7180, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base)); + dasm_put(Dst, 7256, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 7383, Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7422, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount)); +#endif + dasm_put(Dst, 7455, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE); + dasm_put(Dst, 7509, Dt1(->base), Dt1(->base), GG_DISP2STATIC); +#if LJ_HASJIT + dasm_put(Dst, 7576, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L)); +#endif + dasm_put(Dst, 7623); +#if LJ_HASJIT + dasm_put(Dst, 7450); +#endif + dasm_put(Dst, 7630); +#if LJ_HASJIT + dasm_put(Dst, 7633); +#endif + dasm_put(Dst, 7643, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7676); +#endif + dasm_put(Dst, 7681, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7712, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 16*8, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC); +#endif + dasm_put(Dst, 7951); +#if LJ_HASJIT + dasm_put(Dst, 7954, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF); +#endif + dasm_put(Dst, 8054); + if (!sse) { + dasm_put(Dst, 8057); + } + dasm_put(Dst, 8102, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + if (!sse) { + dasm_put(Dst, 8188); + } + dasm_put(Dst, 8233, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(bff00000,00000000)), (unsigned int)((U64x(bff00000,00000000))>>32)); + if (!sse) { + dasm_put(Dst, 8319); + } + dasm_put(Dst, 8358, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + if (sse) { + dasm_put(Dst, 8447, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + } else { + dasm_put(Dst, 8561); + } + dasm_put(Dst, 8608); + if (!sse) { + } else { + dasm_put(Dst, 8685); + } + dasm_put(Dst, 8688); + dasm_put(Dst, 8773, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + dasm_put(Dst, 8876, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7ff00000,00000000)), (unsigned int)((U64x(7ff00000,00000000))>>32)); + dasm_put(Dst, 9038); +#if LJ_HASJIT + if (sse) { + dasm_put(Dst, 9079); + dasm_put(Dst, 9149); + dasm_put(Dst, 9221); + } else { + dasm_put(Dst, 9273); + dasm_put(Dst, 9365); + } + dasm_put(Dst, 9411); +#endif + dasm_put(Dst, 9415); + if (sse) { + dasm_put(Dst, 9418, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32)); + dasm_put(Dst, 9503, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32)); + } else { + dasm_put(Dst, 9631); + dasm_put(Dst, 9714); + if (cmov) { + dasm_put(Dst, 9769); + } else { + dasm_put(Dst, 9788); + } + dasm_put(Dst, 9411); + } + dasm_put(Dst, 9829); +#if LJ_HASFFI +#define DtE(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V) + dasm_put(Dst, 9853, DtE(->spadj)); +#if LJ_TARGET_WINDOWS +#endif + dasm_put(Dst, 9869, DtE(->nsp), offsetof(CCallState, stack), CCALL_SPS_EXTRA*8, DtE(->nfpr), DtE(->gpr[0]), DtE(->gpr[1]), DtE(->gpr[2]), DtE(->gpr[3]), DtE(->gpr[4]), DtE(->gpr[5]), DtE(->fpr[0]), DtE(->fpr[1])); + dasm_put(Dst, 9948, DtE(->fpr[2]), DtE(->fpr[3]), DtE(->fpr[4]), DtE(->fpr[5]), DtE(->fpr[6]), DtE(->fpr[7]), DtE(->func), DtE(->gpr[0]), DtE(->fpr[0]), DtE(->gpr[1]), DtE(->fpr[1])); +#if LJ_TARGET_WINDOWS +#endif + dasm_put(Dst, 10003); +#endif + dasm_put(Dst, 10011); +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 9413); +#endif + dasm_put(Dst, 9413); +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse) +{ + int vk = 0; + dasm_put(Dst, 10014, defop); + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + if (LJ_DUALNUM) { + dasm_put(Dst, 10016, LJ_TISNUM, LJ_TISNUM); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10046); + break; + case BC_ISGE: + dasm_put(Dst, 10051); + break; + case BC_ISLE: + dasm_put(Dst, 10056); + break; + case BC_ISGT: + dasm_put(Dst, 10061); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10066, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10121); + } else { + dasm_put(Dst, 10132); + } + dasm_put(Dst, 10143); + if (sse) { + dasm_put(Dst, 10150); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10170); + break; + case BC_ISGE: + dasm_put(Dst, 10175); + break; + case BC_ISLE: + dasm_put(Dst, 10180); + break; + case BC_ISGT: + dasm_put(Dst, 10185); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10190); + } else { + dasm_put(Dst, 10195); + } + } else { + dasm_put(Dst, 10203, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10224); + } else { + dasm_put(Dst, 10245); + if (cmov) { + dasm_put(Dst, 10261); + } else { + dasm_put(Dst, 10267); + } + } + if (LJ_DUALNUM) { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10170); + break; + case BC_ISGE: + dasm_put(Dst, 10175); + break; + case BC_ISLE: + dasm_put(Dst, 10180); + break; + case BC_ISGT: + dasm_put(Dst, 10185); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10190); + } else { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10274); + break; + case BC_ISGE: + dasm_put(Dst, 10279); + break; + case BC_ISLE: + dasm_put(Dst, 10284); + break; + case BC_ISGT: + dasm_put(Dst, 10289); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10294, -BCBIAS_J*4); + } + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + dasm_put(Dst, 10327); + if (LJ_DUALNUM) { + dasm_put(Dst, 10335, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 10360); + } else { + dasm_put(Dst, 10365); + } + dasm_put(Dst, 10370, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10423); + } else { + dasm_put(Dst, 10430); + } + dasm_put(Dst, 10434); + if (sse) { + dasm_put(Dst, 10445); + } else { + dasm_put(Dst, 10457); + } + dasm_put(Dst, 10464); + } else { + dasm_put(Dst, 10469, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10488); + } else { + dasm_put(Dst, 10506); + if (cmov) { + dasm_put(Dst, 10261); + } else { + dasm_put(Dst, 10267); + } + } + iseqne_fp: + if (vk) { + dasm_put(Dst, 10519); + } else { + dasm_put(Dst, 10528); + } + iseqne_end: + if (vk) { + dasm_put(Dst, 10537, -BCBIAS_J*4); + if (!LJ_HASFFI) { + dasm_put(Dst, 4675); + } + } else { + if (!LJ_HASFFI) { + dasm_put(Dst, 4675); + } + dasm_put(Dst, 10552, -BCBIAS_J*4); + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + dasm_put(Dst, 10567); + } else { + dasm_put(Dst, 10306); + } + if (op == BC_ISEQV || op == BC_ISNEV) { + dasm_put(Dst, 10572); + if (LJ_HASFFI) { + dasm_put(Dst, 10575, LJ_TCDATA, LJ_TCDATA); + } + dasm_put(Dst, 10594, LJ_TISPRI, LJ_TISTABUD, Dt6(->metatable), Dt6(->nomm), 1<<MM_eq); + if (vk) { + dasm_put(Dst, 10650); + } else { + dasm_put(Dst, 10654); + } + dasm_put(Dst, 10660); + } else if (LJ_HASFFI) { + dasm_put(Dst, 10665, LJ_TCDATA); + if (LJ_DUALNUM && vk) { + dasm_put(Dst, 10672); + } else { + dasm_put(Dst, 10645); + } + dasm_put(Dst, 10677); + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + dasm_put(Dst, 10682, LJ_TSTR); + iseqne_test: + if (vk) { + dasm_put(Dst, 10523); + } else { + dasm_put(Dst, 814); + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + dasm_put(Dst, 10709); + if (LJ_DUALNUM) { + dasm_put(Dst, 10717, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 10360); + } else { + dasm_put(Dst, 10365); + } + dasm_put(Dst, 10744, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10794); + } else { + dasm_put(Dst, 10802); + } + dasm_put(Dst, 10807); + if (sse) { + dasm_put(Dst, 10814); + } else { + dasm_put(Dst, 10827); + } + dasm_put(Dst, 10464); + } else { + dasm_put(Dst, 10835, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10844); + } else { + dasm_put(Dst, 10863); + if (cmov) { + dasm_put(Dst, 10261); + } else { + dasm_put(Dst, 10267); + } + } + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + dasm_put(Dst, 10877); + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + dasm_put(Dst, 10891, -BCBIAS_J*4, LJ_TCDATA); + } else { + dasm_put(Dst, 10942, LJ_TCDATA, -BCBIAS_J*4); + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + dasm_put(Dst, 10987, LJ_TISTRUECOND); + if (op == BC_IST || op == BC_ISTC) { + dasm_put(Dst, 10289); + } else { + dasm_put(Dst, 10284); + } + if (op == BC_ISTC || op == BC_ISFC) { + dasm_put(Dst, 10999); + } + dasm_put(Dst, 10294, -BCBIAS_J*4); + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + dasm_put(Dst, 11010); + break; + case BC_NOT: + dasm_put(Dst, 11039, LJ_TISTRUECOND, LJ_TTRUE); + break; + case BC_UNM: + if (LJ_DUALNUM) { + dasm_put(Dst, 11076, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 11154, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11165, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32)); + } else { + dasm_put(Dst, 11190); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 10567); + } else { + dasm_put(Dst, 10306); + } + break; + case BC_LEN: + dasm_put(Dst, 11199, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 11213, Dt5(->len), LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 11227, Dt5(->len)); + } else { + dasm_put(Dst, 11245, Dt5(->len)); + } + dasm_put(Dst, 11254, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 11290, Dt6(->metatable)); +#endif + dasm_put(Dst, 11304); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 11313); + } else { + } + dasm_put(Dst, 11319); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 11332, Dt6(->nomm), 1<<MM_len); +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11356, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11391, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11426, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11459, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11223); + } else { + dasm_put(Dst, 11006); + } + dasm_put(Dst, 10306); + } else { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11489); + } else { + dasm_put(Dst, 11504); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11537); + } else { + dasm_put(Dst, 11552); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11583); + } else { + dasm_put(Dst, 11597); + } + break; + } + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + } + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11605, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11640, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11675, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11459, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11223); + } else { + dasm_put(Dst, 11006); + } + dasm_put(Dst, 10306); + } else { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11708); + } else { + dasm_put(Dst, 11723); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11732); + } else { + dasm_put(Dst, 11747); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11756); + } else { + dasm_put(Dst, 11770); + } + break; + } + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + } + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11778, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11814, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11850, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11459, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11223); + } else { + dasm_put(Dst, 11006); + } + dasm_put(Dst, 10306); + } else { + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11884); + } else { + dasm_put(Dst, 11899); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11908); + } else { + dasm_put(Dst, 11923); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11932); + } else { + dasm_put(Dst, 11946); + } + break; + } + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + } + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11954); + } else { + dasm_put(Dst, 11969); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11978); + } else { + dasm_put(Dst, 11993); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12002); + } else { + dasm_put(Dst, 12016); + } + break; + } + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + break; + case BC_MODVN: + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12024); + } else { + dasm_put(Dst, 12039); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12048); + } else { + dasm_put(Dst, 12063); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12072); + } else { + dasm_put(Dst, 12086); + } + break; + } + dasm_put(Dst, 12094); + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + break; + case BC_MODNV: case BC_MODVV: + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12024); + } else { + dasm_put(Dst, 12039); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12048); + } else { + dasm_put(Dst, 12063); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12072); + } else { + dasm_put(Dst, 12086); + } + break; + } + dasm_put(Dst, 12100); + break; + case BC_POW: + dasm_put(Dst, 11348); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11465, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11477, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12024); + } else { + dasm_put(Dst, 12039); + } + break; + case 1: + dasm_put(Dst, 11513, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11525, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12048); + } else { + dasm_put(Dst, 12063); + } + break; + default: + dasm_put(Dst, 11561, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12072); + } else { + dasm_put(Dst, 12086); + } + break; + } + dasm_put(Dst, 12105); + if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 10306); + break; + + case BC_CAT: + dasm_put(Dst, 12109, Dt1(->base), Dt1(->base)); + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + dasm_put(Dst, 12193, LJ_TSTR); + break; + case BC_KCDATA: +#if LJ_HASFFI + dasm_put(Dst, 12193, LJ_TCDATA); +#endif + break; + case BC_KSHORT: + if (LJ_DUALNUM) { + dasm_put(Dst, 12230, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 12242); + } else { + dasm_put(Dst, 12257); + } + dasm_put(Dst, 10306); + break; + case BC_KNUM: + if (sse) { + dasm_put(Dst, 12265); + } else { + dasm_put(Dst, 12279); + } + dasm_put(Dst, 10306); + break; + case BC_KPRI: + dasm_put(Dst, 12287); + break; + case BC_KNIL: + dasm_put(Dst, 12316, LJ_TNIL); + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + dasm_put(Dst, 12364, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + dasm_put(Dst, 12405, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G); + dasm_put(Dst, 12501); + break; +#undef TV2MARKOFS + case BC_USETS: + dasm_put(Dst, 12513, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G); + break; + case BC_USETN: + dasm_put(Dst, 12609); + if (sse) { + dasm_put(Dst, 12614); + } else { + dasm_put(Dst, 10830); + } + dasm_put(Dst, 12622, offsetof(GCfuncL, uvptr), DtA(->v)); + if (sse) { + dasm_put(Dst, 12631); + } else { + dasm_put(Dst, 12637); + } + dasm_put(Dst, 10306); + break; + case BC_USETP: + dasm_put(Dst, 12640, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_UCLO: + dasm_put(Dst, 12680, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base)); + break; + + case BC_FNEW: + dasm_put(Dst, 12736, Dt1(->base), Dt1(->base), LJ_TFUNC); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + dasm_put(Dst, 12803, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB); + break; + case BC_TDUP: + dasm_put(Dst, 12927, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB); + break; + + case BC_GGET: + dasm_put(Dst, 13026, Dt7(->env)); + break; + case BC_GSET: + dasm_put(Dst, 13046, Dt7(->env)); + break; + + case BC_TGETV: + dasm_put(Dst, 13066, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 13089, LJ_TISNUM); + } else { + dasm_put(Dst, 13103, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 13114); + } else { + } + dasm_put(Dst, 13135); + } + dasm_put(Dst, 13140, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index, LJ_TNIL); + dasm_put(Dst, 13232, LJ_TSTR); + break; + case BC_TGETS: + dasm_put(Dst, 13250, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 13335, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + case BC_TGETB: + dasm_put(Dst, 13407, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + dasm_put(Dst, 13503, LJ_TNIL); + break; + + case BC_TSETV: + dasm_put(Dst, 13520, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 13089, LJ_TISNUM); + } else { + dasm_put(Dst, 13103, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 13114); + } else { + } + dasm_put(Dst, 13543); + } + dasm_put(Dst, 13548, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex); + dasm_put(Dst, 13629, LJ_TSTR, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETS: + dasm_put(Dst, 13688, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 13765, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, DtB(->next)); + dasm_put(Dst, 13852, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, LJ_TSTR, Dt1(->base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETB: + dasm_put(Dst, 13944, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable)); + dasm_put(Dst, 14039, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + case BC_TSETM: + dasm_put(Dst, 14087, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base)); + dasm_put(Dst, 14237, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + dasm_put(Dst, 11352); + if (op == BC_CALLM) { + dasm_put(Dst, 14257); + } + dasm_put(Dst, 14262, LJ_TFUNC, Dt7(->pc)); + break; + + case BC_CALLMT: + dasm_put(Dst, 14257); + break; + case BC_CALLT: + dasm_put(Dst, 14305, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc)); + dasm_put(Dst, 14423, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG); + break; + + case BC_ITERC: + dasm_put(Dst, 14497, LJ_TFUNC, 2+1, Dt7(->pc)); + break; + + case BC_ITERN: +#if LJ_HASJIT +#endif + dasm_put(Dst, 14569, Dt6(->asize), Dt6(->array), LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 11218, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 11313); + } else { + dasm_put(Dst, 14621); + } + dasm_put(Dst, 14627); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 11183); + } else { + dasm_put(Dst, 11195); + } + dasm_put(Dst, 14640, -BCBIAS_J*4); + if (!LJ_DUALNUM && !sse) { + dasm_put(Dst, 14694); + } + dasm_put(Dst, 14700, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key), DtB(->val)); + break; + + case BC_ISNEXT: + dasm_put(Dst, 14779, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC); + break; + + case BC_VARG: + dasm_put(Dst, 14880, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack)); + dasm_put(Dst, 15047, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + dasm_put(Dst, 14257); + break; + + case BC_RET: case BC_RET0: case BC_RET1: + if (op != BC_RET0) { + dasm_put(Dst, 15117); + } + dasm_put(Dst, 15121, FRAME_TYPE); + switch (op) { + case BC_RET: + dasm_put(Dst, 15140); + break; + case BC_RET1: + dasm_put(Dst, 15194); + /* fallthrough */ + case BC_RET0: + dasm_put(Dst, 15204); + default: + break; + } + dasm_put(Dst, 15215, Dt7(->pc), PC2PROTO(k)); + if (op == BC_RET) { + dasm_put(Dst, 15263, LJ_TNIL); + } else { + dasm_put(Dst, 15274, LJ_TNIL); + } + dasm_put(Dst, 15281, -FRAME_VARG, FRAME_TYPEP); + if (op != BC_RET0) { + dasm_put(Dst, 15305); + } + dasm_put(Dst, 4752); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + + case BC_FORL: +#if LJ_HASJIT + dasm_put(Dst, 15309, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + dasm_put(Dst, 15330); + if (LJ_DUALNUM) { + dasm_put(Dst, 15334, LJ_TISNUM); + if (!vk) { + dasm_put(Dst, 15344, LJ_TISNUM, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 15373, LJ_TISNUM, LJ_TISNUM); +#endif + dasm_put(Dst, 15392); + } + dasm_put(Dst, 15411, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 15422, -BCBIAS_J*4); + } else if (op == BC_JFORI) { + dasm_put(Dst, 15436, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 15454, -BCBIAS_J*4); + } else { + dasm_put(Dst, 15446, BC_JLOOP); + } + dasm_put(Dst, 15468); + if (vk) { + dasm_put(Dst, 15493); + } + dasm_put(Dst, 15411, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 15502); + } else if (op == BC_JFORI) { + dasm_put(Dst, 15507, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 15521); + } else { + dasm_put(Dst, 15517, BC_JLOOP); + } + dasm_put(Dst, 15526); + } else if (!vk) { + dasm_put(Dst, 15533, LJ_TISNUM); + } + if (!vk) { + dasm_put(Dst, 15539, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 15553, LJ_TISNUM, LJ_TISNUM); +#endif + } + dasm_put(Dst, 15572); + if (!vk) { + dasm_put(Dst, 15576, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 15585); + if (vk) { + dasm_put(Dst, 15597); + } else { + dasm_put(Dst, 15616); + } + dasm_put(Dst, 15621); + } else { + dasm_put(Dst, 15634); + if (vk) { + dasm_put(Dst, 15640); + } else { + dasm_put(Dst, 15656); + } + dasm_put(Dst, 15664); + if (cmov) { + dasm_put(Dst, 10261); + } else { + dasm_put(Dst, 10267); + } + if (!cmov) { + dasm_put(Dst, 15669); + } + } + if (op == BC_FORI) { + if (LJ_DUALNUM) { + dasm_put(Dst, 15675); + } else { + dasm_put(Dst, 15680, -BCBIAS_J*4); + } + } else if (op == BC_JFORI) { + dasm_put(Dst, 15690, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + if (LJ_DUALNUM) { + dasm_put(Dst, 15704); + } else { + dasm_put(Dst, 15709, -BCBIAS_J*4); + } + } else { + dasm_put(Dst, 15700, BC_JLOOP); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 10190); + } else { + dasm_put(Dst, 10964); + } + if (sse) { + dasm_put(Dst, 15719); + } + break; + + case BC_ITERL: +#if LJ_HASJIT + dasm_put(Dst, 15309, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + dasm_put(Dst, 15730, LJ_TNIL); + if (op == BC_JITERL) { + dasm_put(Dst, 15745, BC_JLOOP); + } else { + dasm_put(Dst, 15759, -BCBIAS_J*4); + } + dasm_put(Dst, 10304); + break; + + case BC_LOOP: +#if LJ_HASJIT + dasm_put(Dst, 15309, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_ILOOP: + dasm_put(Dst, 10306); + break; + + case BC_JLOOP: +#if LJ_HASJIT + dasm_put(Dst, 15775, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L)); +#endif + break; + + case BC_JMP: + dasm_put(Dst, 15816, -BCBIAS_J*4); + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: +#if LJ_HASJIT + dasm_put(Dst, 15842, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + dasm_put(Dst, 15863, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams)); + if (op == BC_JFUNCF) { + dasm_put(Dst, 15894, BC_JLOOP); + } else { + dasm_put(Dst, 10306); + } + dasm_put(Dst, 15903, LJ_TNIL); + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + dasm_put(Dst, 9413); + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + dasm_put(Dst, 15925, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL); + if (op == BC_JFUNCV) { + dasm_put(Dst, 15894, BC_JLOOP); + } else { + dasm_put(Dst, 16022, -4+PC2PROTO(k)); + } + dasm_put(Dst, 16047, LJ_TNIL); + break; + + case BC_FUNCC: + case BC_FUNCCW: + dasm_put(Dst, 16069, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top)); + if (op == BC_FUNCC) { + dasm_put(Dst, 2424); + } else { + dasm_put(Dst, 16099); + } + dasm_put(Dst, 16107, DISPATCH_GL(vmstate), ~LJ_VMST_C); + if (op == BC_FUNCC) { + dasm_put(Dst, 16117); + } else { + dasm_put(Dst, 16122, DISPATCH_GL(wrapf)); + } + dasm_put(Dst, 16128, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top)); + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + int cmov = 1; + int sse = 0; +#ifdef LUAJIT_CPU_NOCMOV + cmov = 0; +#endif +#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64) + sse = 1; +#endif + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx, cmov, sse); + + dasm_put(Dst, 16154); + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op, cmov, sse); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ +#if LJ_64 +#define SZPTR "8" +#define BSZPTR "3" +#define REG_SP "0x7" +#define REG_RA "0x10" +#else +#define SZPTR "4" +#define BSZPTR "2" +#define REG_SP "0x4" +#define REG_RA "0x8" +#endif + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE0:\n\n", (int)ctx->codesz, CFRAME_SIZE); +#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_) + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE1:\n\n", (int)ctx->codesz, CFRAME_SIZE); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n"); + fprintf(ctx->fp, + "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n", + LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "Lframe1:\n" + "\t.long LECIE1-LSCIE1\n" + "LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zP\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 5\n" /* augmentation length */ + "\t.byte 0x00\n" /* absptr */ + "\t.long %slj_err_unwind_dwarf\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + "LECIE1:\n\n", LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "LSFDE1:\n" + "\t.long LEFDE1-LASFDE1\n" + "LASFDE1:\n" + "\t.long LASFDE1-Lframe1\n" + "\t.long %slj_vm_asm_begin\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE); + break; + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ +#if LJ_64 + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ +#else + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_64 + fprintf(ctx->fp, "\t.subsections_via_symbols\n"); +#else + fprintf(ctx->fp, + "\t.non_lazy_symbol_pointer\n" + "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" + ".indirect_symbol _lj_err_unwind_dwarf\n" + ".long 0\n"); +#endif + } + break; + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/src/luajit2/src/buildvm_x64win.h b/src/luajit2/src/buildvm_x64win.h new file mode 100644 index 0000000000..bb8132841b --- /dev/null +++ b/src/luajit2/src/buildvm_x64win.h @@ -0,0 +1,3247 @@ +/* +** This file has been pre-processed with DynASM. +** http://luajit.org/dynasm.html +** DynASM version 1.3.0, DynASM x64 version 1.3.0 +** DO NOT EDIT! The original file is in "buildvm_x86.dasc". +*/ + +#if DASM_VERSION != 10300 +#error "Version mismatch between DynASM and included encoding engine" +#endif + +#define DASM_SECTION_CODE_OP 0 +#define DASM_SECTION_CODE_SUB 1 +#define DASM_MAXSECTION 2 +static const unsigned char build_actionlist[16011] = { + 254,1,248,10,252,247,198,237,15,132,244,11,131,230,252,248,41,252,242,72, + 141,76,49,252,248,139,114,252,252,199,68,10,4,237,248,12,131,192,1,137,68, + 36,84,252,247,198,237,15,132,244,13,248,14,129,252,246,239,252,247,198,237, + 15,133,244,10,199,131,233,237,131,230,252,248,41,214,252,247,222,131,232, + 1,15,132,244,248,248,1,72,139,44,10,72,137,106,252,248,131,194,8,131,232, + 1,15,133,244,1,248,2,255,139,108,36,96,137,181,233,248,3,139,68,36,84,139, + 76,36,88,248,4,57,193,15,133,244,252,248,5,131,252,234,8,137,149,233,248, + 15,72,139,76,36,104,72,137,141,233,49,192,248,16,72,131,196,40,91,94,95,93, + 195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252,237,131, + 194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193,141,20, + 202,252,233,244,5,248,8,137,149,233,137,68,36,84,137,202,137,252,233,232, + 251,1,0,139,149,233,252,233,244,3,248,17,137,208,72,137,204,248,18,139,108, + 36,96,139,173,233,199,133,233,237,252,233,244,16,248,19,248,20,72,129,225, + 239,72,137,204,248,21,255,139,108,36,96,72,199,193,252,248,252,255,252,255, + 252,255,184,237,139,149,233,139,157,233,129,195,239,139,114,252,252,199,66, + 252,252,237,199,131,233,237,252,233,244,12,248,22,186,237,252,233,244,248, + 248,23,131,232,8,252,233,244,247,248,24,141,68,194,252,248,248,1,15,182,142, + 233,131,198,4,137,149,233,255,137,133,233,137,116,36,100,137,202,248,2,137, + 252,233,232,251,1,0,139,149,233,139,133,233,139,106,252,248,41,208,193,232, + 3,131,192,1,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255, + 36,252,235,248,25,85,87,86,83,72,131,252,236,40,137,205,137,76,36,96,137, + 209,190,237,49,192,72,141,188,253,36,233,139,157,233,129,195,239,72,137,189, + 233,137,68,36,100,72,137,68,36,104,137,68,36,88,137,68,36,92,56,133,233,15, + 132,244,249,199,131,233,237,136,133,233,139,149,233,139,133,233,41,200,193, + 232,3,131,192,1,41,209,139,114,252,252,137,68,36,84,252,247,198,237,255,15, + 132,244,13,252,233,244,14,248,26,85,87,86,83,72,131,252,236,40,190,237,68, + 137,76,36,92,252,233,244,247,248,27,85,87,86,83,72,131,252,236,40,190,237, + 248,1,68,137,68,36,88,137,205,137,76,36,96,137,209,72,139,189,233,72,137, + 124,36,104,137,108,36,100,72,137,165,233,248,2,139,157,233,129,195,239,248, + 3,199,131,233,237,139,149,233,255,1,206,41,214,139,133,233,41,200,193,232, + 3,131,192,1,248,28,139,105,252,248,129,121,253,252,252,239,15,133,244,29, + 248,30,137,202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,205, + 131,198,4,252,255,36,252,235,248,31,85,87,86,83,72,131,252,236,40,137,205, + 137,76,36,96,137,108,36,100,139,189,233,43,189,233,199,68,36,92,0,0,0,0,137, + 124,36,88,72,139,189,233,72,137,124,36,104,72,137,165,233,65,252,255,209, + 133,192,15,132,244,15,137,193,190,237,252,233,244,2,248,11,1,209,131,230, + 252,248,137,213,41,252,242,199,68,193,252,252,237,137,200,139,117,252,244, + 72,99,77,252,240,133,201,15,132,244,247,255,72,141,61,245,72,1,252,249,139, + 122,252,248,139,191,233,139,191,233,252,255,225,248,1,41,213,193,252,237, + 3,141,69,252,255,252,233,244,32,248,33,15,182,78,252,255,131,252,237,16,141, + 12,202,41,252,233,15,132,244,34,252,247,217,193,252,233,3,65,137,200,139, + 76,36,96,137,145,233,72,139,0,72,137,69,0,137,252,234,252,233,244,35,248, + 36,137,68,36,80,199,68,36,84,237,72,141,68,36,80,128,126,252,252,235,15,133, + 244,247,141,139,233,137,41,199,65,4,237,255,137,205,252,233,244,248,248,37, + 15,182,70,252,254,255,199,68,36,84,237,137,68,36,80,255,252,242,15,42,192, + 252,242,15,17,68,36,80,255,72,141,68,36,80,252,233,244,247,248,38,15,182, + 70,252,254,141,4,194,248,1,15,182,110,252,255,141,44,252,234,248,2,139,76, + 36,96,137,145,233,137,252,234,73,137,192,137,205,137,116,36,100,232,251,1, + 1,139,149,233,133,192,15,132,244,249,248,34,15,182,78,252,253,72,139,40,72, + 137,44,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252, + 235,248,3,139,141,233,137,113,252,244,141,177,233,41,214,139,105,252,248, + 184,237,252,233,244,30,248,39,137,68,36,80,199,68,36,84,237,72,141,68,36, + 80,128,126,252,252,235,15,133,244,247,255,141,139,233,137,41,199,65,4,237, + 137,205,252,233,244,248,248,40,15,182,70,252,254,255,72,141,68,36,80,252, + 233,244,247,248,41,15,182,70,252,254,141,4,194,248,1,15,182,110,252,255,141, + 44,252,234,248,2,139,76,36,96,137,145,233,137,252,234,73,137,192,137,205, + 137,116,36,100,232,251,1,2,139,149,233,133,192,15,132,244,249,15,182,78,252, + 253,72,139,44,202,72,137,40,248,42,139,6,15,182,204,15,182,232,131,198,4, + 193,232,16,252,255,36,252,235,248,3,139,141,233,137,113,252,244,15,182,70, + 252,253,72,139,44,194,72,137,105,16,141,177,233,41,214,139,105,252,248,184, + 237,252,233,244,30,248,43,139,108,36,96,137,149,233,68,141,4,194,141,20,202, + 137,252,233,68,15,182,78,252,252,137,116,36,100,232,251,1,3,248,3,139,149, + 233,255,131,252,248,1,15,135,244,44,248,4,141,118,4,15,130,244,252,248,5, + 15,183,70,252,254,141,180,253,134,233,248,6,139,6,15,182,204,15,182,232,131, + 198,4,193,232,16,252,255,36,252,235,248,45,131,198,4,129,120,253,4,239,15, + 130,244,5,252,233,244,6,248,46,129,120,253,4,239,252,233,244,4,248,47,131, + 252,238,4,65,137,192,65,137,252,233,139,108,36,96,137,149,233,255,137,202, + 137,252,233,137,116,36,100,232,251,1,4,252,233,244,3,248,48,255,131,252,238, + 4,139,108,36,96,137,149,233,137,252,233,139,86,252,252,137,116,36,100,232, + 251,1,5,252,233,244,3,255,248,49,255,15,182,110,252,255,255,248,50,141,4, + 199,252,233,244,247,248,51,255,248,52,141,4,199,141,44,252,234,149,252,233, + 244,248,248,53,141,4,194,137,197,252,233,244,248,248,54,255,248,55,141,4, + 194,248,1,141,44,252,234,248,2,141,12,202,65,137,232,65,137,193,15,182,70, + 252,252,137,68,36,32,139,108,36,96,137,149,233,137,202,137,252,233,137,116, + 36,100,232,251,1,6,139,149,233,133,192,15,132,244,42,248,44,137,193,41,208, + 137,113,252,244,141,176,233,184,237,252,233,244,28,248,56,139,108,36,96,137, + 149,233,141,20,194,137,252,233,137,116,36,100,232,251,1,7,139,149,233,255, + 133,192,15,133,244,44,15,183,70,252,254,139,12,194,252,233,244,57,255,252, + 233,244,44,255,248,58,141,76,202,8,248,29,137,76,36,84,137,68,36,80,131,252, + 233,8,139,108,36,96,137,149,233,137,202,68,141,4,193,137,252,233,137,116, + 36,100,232,251,1,8,139,149,233,139,76,36,84,139,68,36,80,139,105,252,248, + 131,192,1,57,215,15,132,244,59,137,202,137,114,252,252,139,181,233,139,14, + 15,182,252,233,15,182,205,131,198,4,252,255,36,252,235,248,60,139,108,36, + 96,137,149,233,137,202,137,252,233,137,116,36,100,232,251,1,9,139,149,233, + 139,70,252,252,15,182,204,15,182,232,193,232,16,252,255,164,253,252,235,233, + 248,61,129,252,248,239,15,130,244,62,139,106,4,129,252,253,239,15,131,244, + 62,139,114,252,252,137,68,36,84,137,106,252,252,139,42,137,106,252,248,131, + 232,2,15,132,244,248,255,137,209,248,1,131,193,8,72,139,41,72,137,105,252, + 248,131,232,1,15,133,244,1,248,2,139,68,36,84,252,233,244,63,248,64,129,252, + 248,239,15,130,244,62,139,106,4,137,252,233,193,252,249,15,131,252,249,252, + 254,15,132,244,249,184,237,252,247,213,57,232,255,15,71,197,255,15,134,244, + 247,137,232,248,1,255,248,2,139,106,252,248,139,132,253,197,233,139,114,252, + 252,199,66,252,252,237,137,66,252,248,252,233,244,65,248,3,184,237,252,233, + 244,2,248,66,129,252,248,239,15,130,244,62,139,106,4,139,114,252,252,129, + 252,253,239,15,133,244,252,248,1,139,42,139,173,233,248,2,133,252,237,199, + 66,252,252,237,255,15,132,244,65,139,131,233,199,66,252,252,237,137,106,252, + 248,139,141,233,35,136,233,105,201,239,3,141,233,248,3,129,185,233,239,15, + 133,244,250,57,129,233,15,132,244,251,248,4,139,137,233,133,201,15,133,244, + 3,255,252,233,244,65,248,5,139,105,4,129,252,253,239,15,132,244,65,139,1, + 137,106,252,252,137,66,252,248,252,233,244,65,248,6,129,252,253,239,15,132, + 244,1,129,252,253,239,15,135,244,254,129,252,253,239,15,134,244,253,189,237, + 252,233,244,254,248,7,255,189,237,248,8,252,247,213,139,172,253,171,233,252, + 233,244,2,248,67,129,252,248,239,15,130,244,62,129,122,253,4,239,15,133,244, + 62,139,42,131,189,233,0,15,133,244,62,129,122,253,12,239,15,133,244,62,139, + 66,8,137,133,233,139,114,252,252,199,66,252,252,237,255,137,106,252,248,252, + 246,133,233,235,15,132,244,247,128,165,233,235,139,131,233,137,171,233,137, + 133,233,248,1,252,233,244,65,248,68,129,252,248,239,15,130,244,62,129,122, + 253,4,239,15,133,244,62,137,213,68,141,66,8,139,18,139,76,36,96,232,251,1, + 10,137,252,234,72,139,40,139,114,252,252,72,137,106,252,248,252,233,244,65, + 248,69,255,129,252,248,239,15,133,244,62,129,122,253,4,239,255,15,133,244, + 247,139,42,252,233,244,70,248,1,15,135,244,62,255,15,131,244,62,255,252,242, + 15,16,2,252,233,244,71,255,221,2,252,233,244,72,255,248,73,129,252,248,239, + 15,130,244,62,139,114,252,252,129,122,253,4,239,15,133,244,249,139,2,248, + 2,199,66,252,252,237,137,66,252,248,252,233,244,65,248,3,129,122,253,4,239, + 15,135,244,62,131,187,233,0,15,133,244,62,139,171,233,59,171,233,255,15,130, + 244,247,232,244,74,248,1,139,108,36,96,137,149,233,137,116,36,100,137,252, + 233,255,232,251,1,11,255,232,251,1,12,255,139,149,233,252,233,244,2,248,75, + 129,252,248,239,15,130,244,62,15,132,244,248,248,1,129,122,253,4,239,15,133, + 244,62,139,108,36,96,137,149,233,137,149,233,139,114,252,252,68,141,66,8, + 139,18,137,252,233,137,116,36,100,232,251,1,13,139,149,233,133,192,15,132, + 244,249,72,139,106,8,72,139,66,16,72,137,106,252,248,72,137,2,248,76,184, + 237,255,252,233,244,77,248,2,199,66,12,237,252,233,244,1,248,3,199,66,252, + 252,237,252,233,244,65,248,78,129,252,248,239,15,130,244,62,139,42,129,122, + 253,4,239,15,133,244,62,255,131,189,233,0,15,133,244,62,255,139,106,252,248, + 139,133,233,139,114,252,252,199,66,252,252,237,137,66,252,248,199,66,12,237, + 184,237,252,233,244,77,248,79,129,252,248,239,15,130,244,62,129,122,253,4, + 239,15,133,244,62,129,122,253,12,239,255,139,114,252,252,255,139,66,8,131, + 192,1,199,66,252,252,237,137,66,252,248,255,252,242,15,16,66,8,72,189,237, + 237,102,72,15,110,205,252,242,15,88,193,252,242,15,45,192,252,242,15,17,66, + 252,248,255,139,42,59,133,233,15,131,244,248,193,224,3,3,133,233,248,1,129, + 120,253,4,239,15,132,244,80,72,139,40,72,137,42,252,233,244,76,248,2,131, + 189,233,0,15,132,244,80,137,252,233,137,213,137,194,232,251,1,14,137,252, + 234,133,192,15,133,244,1,248,80,184,237,252,233,244,77,248,81,255,139,106, + 252,248,139,133,233,139,114,252,252,199,66,252,252,237,137,66,252,248,255, + 199,66,12,237,199,66,8,0,0,0,0,255,15,87,192,252,242,15,17,66,8,255,217,252, + 238,221,90,8,255,184,237,252,233,244,77,248,82,129,252,248,239,15,130,244, + 62,141,74,8,131,232,1,190,237,248,1,15,182,171,233,193,252,237,235,131,229, + 1,1,252,238,252,233,244,28,248,83,129,252,248,239,15,130,244,62,129,122,253, + 12,239,15,133,244,62,255,139,106,4,137,106,12,199,66,4,237,139,42,139,114, + 8,137,106,8,137,50,141,74,16,131,232,2,190,237,252,233,244,1,248,84,129,252, + 248,239,15,130,244,62,139,42,139,114,252,252,137,116,36,100,137,108,36,80, + 129,122,253,4,239,15,133,244,62,72,131,189,233,0,15,133,244,62,128,189,233, + 235,15,135,244,62,139,141,233,15,132,244,247,255,59,141,233,15,132,244,62, + 248,1,141,116,193,252,240,59,181,233,15,135,244,62,137,181,233,139,108,36, + 96,137,149,233,131,194,8,137,149,233,141,108,194,232,72,41,252,245,57,206, + 15,132,244,249,248,2,72,139,4,46,72,137,70,252,248,131,252,238,8,57,206,15, + 133,244,2,248,3,137,202,139,76,36,80,232,244,25,199,131,233,237,255,139,108, + 36,96,139,116,36,80,139,149,233,129,252,248,239,15,135,244,254,248,4,139, + 142,233,139,190,233,137,142,233,137,252,254,41,206,15,132,244,252,141,4,50, + 193,252,238,3,59,133,233,15,135,244,255,137,213,72,41,205,248,5,72,139,1, + 72,137,4,41,131,193,8,57,252,249,15,133,244,5,248,6,141,70,2,199,66,252,252, + 237,248,7,139,116,36,100,137,68,36,84,72,199,193,252,248,252,255,252,255, + 252,255,252,247,198,237,255,15,132,244,13,252,233,244,14,248,8,199,66,252, + 252,237,139,142,233,131,252,233,8,137,142,233,72,139,1,72,137,2,184,237,252, + 233,244,7,248,9,139,76,36,80,137,185,233,137,252,242,137,252,233,232,251, + 1,0,139,116,36,80,139,149,233,252,233,244,4,248,85,139,106,252,248,139,173, + 233,139,114,252,252,137,116,36,100,137,108,36,80,72,131,189,233,0,15,133, + 244,62,255,128,189,233,235,15,135,244,62,139,141,233,15,132,244,247,59,141, + 233,15,132,244,62,248,1,141,116,193,252,248,59,181,233,15,135,244,62,137, + 181,233,139,108,36,96,137,149,233,137,149,233,141,108,194,252,240,72,41,252, + 245,57,206,15,132,244,249,248,2,255,72,139,4,46,72,137,70,252,248,131,252, + 238,8,57,206,15,133,244,2,248,3,137,202,139,76,36,80,232,244,25,199,131,233, + 237,139,108,36,96,139,116,36,80,139,149,233,129,252,248,239,15,135,244,254, + 248,4,139,142,233,139,190,233,137,142,233,137,252,254,41,206,15,132,244,252, + 141,4,50,193,252,238,3,59,133,233,15,135,244,255,255,137,213,72,41,205,248, + 5,72,139,1,72,137,4,41,131,193,8,57,252,249,15,133,244,5,248,6,141,70,1,248, + 7,139,116,36,100,137,68,36,84,49,201,252,247,198,237,15,132,244,13,252,233, + 244,14,248,8,137,252,242,137,252,233,232,251,1,15,248,9,139,76,36,80,137, + 185,233,137,252,242,137,252,233,232,251,1,0,139,116,36,80,139,149,233,252, + 233,244,4,248,86,139,108,36,96,72,252,247,133,233,237,15,132,244,62,255,137, + 149,233,141,68,194,252,248,137,133,233,49,192,72,137,133,233,176,235,136, + 133,233,252,233,244,16,255,248,70,255,248,72,139,114,252,252,221,90,252,248, + 252,233,244,65,255,248,87,129,252,248,239,15,130,244,62,255,129,122,253,4, + 239,15,133,244,248,139,42,131,252,253,0,15,137,244,70,252,247,221,15,136, + 244,247,248,88,248,70,139,114,252,252,199,66,252,252,237,137,106,252,248, + 252,233,244,65,248,1,139,114,252,252,199,66,252,252,0,0,224,65,199,66,252, + 248,0,0,0,0,252,233,244,65,248,2,15,135,244,62,255,129,122,253,4,239,15,131, + 244,62,255,252,242,15,16,2,72,184,237,237,102,72,15,110,200,15,84,193,248, + 71,139,114,252,252,252,242,15,17,66,252,248,255,221,2,217,225,248,71,248, + 72,139,114,252,252,221,90,252,248,255,248,65,184,237,248,77,137,68,36,84, + 248,63,252,247,198,237,15,133,244,253,248,5,56,70,252,255,15,135,244,252, + 15,182,78,252,253,72,252,247,209,141,20,202,139,6,15,182,204,15,182,232,131, + 198,4,193,232,16,252,255,36,252,235,248,6,199,68,194,252,244,237,131,192, + 1,252,233,244,5,248,7,72,199,193,252,248,252,255,252,255,252,255,252,233, + 244,14,248,89,255,129,122,253,4,239,15,133,244,247,139,42,252,233,244,70, + 248,1,15,135,244,62,255,252,242,15,16,2,232,244,90,255,252,242,15,45,232, + 129,252,253,0,0,0,128,15,133,244,70,252,242,15,42,205,102,15,46,193,15,138, + 244,71,15,132,244,70,255,221,2,232,244,90,255,248,91,255,252,242,15,16,2, + 232,244,92,255,221,2,232,244,92,255,248,93,129,252,248,239,15,130,244,62, + 129,122,253,4,239,15,131,244,62,252,242,15,81,2,252,233,244,71,255,248,93, + 129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,252, + 250,252,233,244,72,255,248,94,129,252,248,239,15,130,244,62,129,122,253,4, + 239,15,131,244,62,217,252,237,221,2,217,252,241,252,233,244,72,248,95,129, + 252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,217,252,236,221, + 2,217,252,241,252,233,244,72,248,96,129,252,248,239,255,15,130,244,62,129, + 122,253,4,239,15,131,244,62,221,2,232,244,97,252,233,244,72,248,98,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,252,254,252, + 233,244,72,248,99,129,252,248,239,255,15,130,244,62,129,122,253,4,239,15, + 131,244,62,221,2,217,252,255,252,233,244,72,248,100,129,252,248,239,15,130, + 244,62,129,122,253,4,239,15,131,244,62,221,2,217,252,242,221,216,252,233, + 244,72,248,101,129,252,248,239,15,130,244,62,255,129,122,253,4,239,15,131, + 244,62,221,2,217,192,216,200,217,232,222,225,217,252,250,217,252,243,252, + 233,244,72,248,102,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,221,2,217,192,216,200,217,232,222,225,217,252,250,217,201,217,252, + 243,252,233,244,72,248,103,129,252,248,239,15,130,244,62,129,122,253,4,239, + 15,131,244,62,255,221,2,217,232,217,252,243,252,233,244,72,255,248,104,129, + 252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2, + 255,137,213,232,251,1,16,137,252,234,252,233,244,71,255,248,105,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,255,137, + 213,232,251,1,17,137,252,234,252,233,244,71,255,248,106,129,252,248,239,15, + 130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,255,137,213,232, + 251,1,18,137,252,234,252,233,244,71,248,107,255,248,108,129,252,248,239,15, + 130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,139,106,252,248, + 252,242,15,89,133,233,252,233,244,71,255,248,108,129,252,248,239,15,130,244, + 62,129,122,253,4,239,15,131,244,62,221,2,139,106,252,248,220,141,233,252, + 233,244,72,255,248,109,129,252,248,239,15,130,244,62,129,122,253,4,239,15, + 131,244,62,129,122,253,12,239,15,131,244,62,221,2,221,66,8,217,252,243,252, + 233,244,72,248,110,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,129,122,253,12,239,255,15,131,244,62,221,66,8,221,2,217,252,253,221, + 217,252,233,244,72,248,111,129,252,248,239,15,130,244,62,139,106,4,129,252, + 253,239,15,131,244,62,139,114,252,252,139,2,137,106,252,252,137,66,252,248, + 209,229,129,252,253,0,0,224,252,255,15,131,244,249,9,232,15,132,244,249,184, + 252,254,3,0,0,129,252,253,0,0,32,0,15,130,244,250,248,1,193,252,237,21,41, + 197,255,252,242,15,42,197,255,137,108,36,80,219,68,36,80,255,139,106,252, + 252,129,229,252,255,252,255,15,128,129,205,0,0,224,63,137,106,252,252,248, + 2,255,252,242,15,17,2,255,221,26,255,184,237,252,233,244,77,248,3,255,15, + 87,192,252,233,244,2,255,217,252,238,252,233,244,2,255,248,4,255,252,242, + 15,16,2,72,189,237,237,102,72,15,110,205,252,242,15,89,193,252,242,15,17, + 66,252,248,255,221,2,199,68,36,80,0,0,128,90,216,76,36,80,221,90,252,248, + 255,139,106,252,252,184,52,4,0,0,209,229,252,233,244,1,255,248,112,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,255, + 248,112,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,221, + 2,255,139,106,4,139,114,252,252,209,229,129,252,253,0,0,224,252,255,15,132, + 244,250,255,15,40,224,232,244,113,252,242,15,92,224,248,1,252,242,15,17,66, + 252,248,252,242,15,17,34,255,217,192,232,244,113,220,252,233,248,1,221,90, + 252,248,221,26,255,139,66,252,252,139,106,4,49,232,15,136,244,249,248,2,184, + 237,252,233,244,77,248,3,129,252,245,0,0,0,128,137,106,4,252,233,244,2,248, + 4,255,15,87,228,252,233,244,1,255,217,252,238,217,201,252,233,244,1,255,248, + 114,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122, + 253,12,239,15,131,244,62,221,66,8,221,2,248,1,217,252,248,223,224,158,15, + 138,244,1,221,217,252,233,244,72,255,248,115,129,252,248,239,15,130,244,62, + 129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242, + 15,16,2,252,242,15,16,74,8,232,244,116,252,233,244,71,255,248,115,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239, + 15,131,244,62,221,2,221,66,8,232,244,116,252,233,244,72,255,248,117,185,2, + 0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,193,15,131,244, + 70,129,124,253,202,252,252,239,15,133,244,249,59,108,202,252,248,15,79,108, + 202,252,248,131,193,1,252,233,244,1,248,3,15,135,244,62,255,252,233,244,252, + 248,4,15,135,244,62,255,252,242,15,16,2,248,5,57,193,15,131,244,71,129,124, + 253,202,252,252,239,255,15,130,244,252,15,135,244,62,252,242,15,42,76,202, + 252,248,252,233,244,253,255,248,6,252,242,15,16,76,202,252,248,248,7,252, + 242,15,93,193,131,193,1,252,233,244,5,255,248,118,185,2,0,0,0,129,122,253, + 4,239,255,15,133,244,250,139,42,248,1,57,193,15,131,244,70,129,124,253,202, + 252,252,239,15,133,244,249,59,108,202,252,248,15,76,108,202,252,248,131,193, + 1,252,233,244,1,248,3,15,135,244,62,255,248,6,252,242,15,16,76,202,252,248, + 248,7,252,242,15,95,193,131,193,1,252,233,244,5,255,248,9,221,216,252,233, + 244,62,255,248,119,129,252,248,239,15,130,244,62,129,122,253,4,239,15,133, + 244,62,139,42,255,139,173,233,252,233,244,70,255,252,242,15,42,133,233,252, + 233,244,71,255,219,133,233,252,233,244,72,255,248,120,129,252,248,239,15, + 133,244,62,129,122,253,4,239,15,133,244,62,139,42,139,114,252,252,131,189, + 233,1,15,130,244,80,15,182,173,233,255,252,242,15,42,197,252,233,244,71,255, + 137,108,36,80,219,68,36,80,252,233,244,72,255,248,121,139,171,233,59,171, + 233,15,130,244,247,232,244,74,248,1,129,252,248,239,15,133,244,62,129,122, + 253,4,239,255,15,133,244,62,139,42,129,252,253,252,255,0,0,0,15,135,244,62, + 137,108,36,84,255,15,131,244,62,252,242,15,44,42,129,252,253,252,255,0,0, + 0,15,135,244,62,137,108,36,84,255,15,131,244,62,221,2,219,92,36,84,129,124, + 36,84,252,255,0,0,0,15,135,244,62,255,199,68,36,32,1,0,0,0,72,141,68,36,84, + 248,122,139,108,36,96,137,149,233,68,139,68,36,32,72,137,194,137,252,233, + 137,116,36,100,232,251,1,19,139,149,233,139,114,252,252,199,66,252,252,237, + 137,66,252,248,252,233,244,65,248,123,139,171,233,59,171,233,15,130,244,247, + 232,244,74,248,1,199,68,36,84,252,255,252,255,252,255,252,255,129,252,248, + 239,15,130,244,62,15,134,244,247,129,122,253,20,239,255,15,133,244,62,139, + 106,16,137,108,36,84,255,15,131,244,62,252,242,15,44,106,16,137,108,36,84, + 255,15,131,244,62,221,66,16,219,92,36,84,255,248,1,129,122,253,4,239,15,133, + 244,62,129,122,253,12,239,255,139,42,137,108,36,32,139,173,233,255,139,74, + 8,255,252,242,15,44,74,8,255,139,68,36,84,57,197,15,130,244,251,248,2,133, + 201,15,142,244,253,248,3,139,108,36,32,41,200,15,140,244,124,141,172,253, + 13,233,131,192,1,248,4,137,68,36,32,137,232,252,233,244,122,248,5,15,140, + 244,252,141,68,40,1,252,233,244,2,248,6,137,232,252,233,244,2,248,7,255,15, + 132,244,254,1,252,233,131,193,1,15,143,244,3,248,8,185,1,0,0,0,252,233,244, + 3,248,124,49,192,252,233,244,4,248,125,129,252,248,239,15,130,244,62,139, + 171,233,59,171,233,15,130,244,247,232,244,74,248,1,255,129,122,253,4,239, + 15,133,244,62,129,122,253,12,239,139,42,255,15,133,244,62,139,66,8,255,15, + 131,244,62,252,242,15,44,66,8,255,15,131,244,62,221,66,8,219,92,36,84,139, + 68,36,84,255,133,192,15,142,244,124,131,189,233,1,15,130,244,124,15,133,244, + 126,57,131,233,15,130,244,126,15,182,141,233,139,171,233,137,68,36,32,248, + 1,136,77,0,131,197,1,131,232,1,15,133,244,1,139,131,233,252,233,244,122,248, + 127,129,252,248,239,255,15,130,244,62,139,171,233,59,171,233,15,130,244,247, + 232,244,74,248,1,129,122,253,4,239,15,133,244,62,139,42,139,133,233,133,192, + 15,132,244,124,57,131,233,15,130,244,128,129,197,239,137,116,36,84,137,68, + 36,32,139,179,233,248,1,255,15,182,77,0,131,197,1,131,232,1,136,12,6,15,133, + 244,1,137,252,240,139,116,36,84,252,233,244,122,248,129,129,252,248,239,15, + 130,244,62,139,171,233,59,171,233,15,130,244,247,232,244,74,248,1,129,122, + 253,4,239,15,133,244,62,139,42,139,133,233,57,131,233,255,15,130,244,128, + 129,197,239,137,116,36,84,137,68,36,32,139,179,233,252,233,244,249,248,1, + 15,182,76,5,0,131,252,249,65,15,130,244,248,131,252,249,90,15,135,244,248, + 131,252,241,32,248,2,136,12,6,248,3,131,232,1,15,137,244,1,137,252,240,139, + 116,36,84,252,233,244,122,248,130,129,252,248,239,15,130,244,62,255,139,171, + 233,59,171,233,15,130,244,247,232,244,74,248,1,129,122,253,4,239,15,133,244, + 62,139,42,139,133,233,57,131,233,15,130,244,128,129,197,239,137,116,36,84, + 137,68,36,32,139,179,233,252,233,244,249,248,1,15,182,76,5,0,131,252,249, + 97,15,130,244,248,255,131,252,249,122,15,135,244,248,131,252,241,32,248,2, + 136,12,6,248,3,131,232,1,15,137,244,1,137,252,240,139,116,36,84,252,233,244, + 122,248,131,129,252,248,239,15,130,244,62,129,122,253,4,239,15,133,244,62, + 137,213,139,10,232,251,1,20,137,252,234,255,137,197,252,233,244,70,255,252, + 242,15,42,192,252,233,244,71,255,248,132,129,252,248,239,15,130,244,62,129, + 122,253,4,239,255,15,133,244,247,139,42,252,233,244,88,248,1,15,135,244,62, + 255,252,242,15,16,2,72,189,237,237,102,72,15,110,205,252,242,15,88,193,102, + 15,126,197,255,252,233,244,88,255,248,133,129,252,248,239,15,130,244,62,255, + 72,189,237,237,102,72,15,110,205,255,199,68,36,80,0,0,192,89,255,15,133,244, + 247,139,42,252,233,244,248,248,1,15,135,244,62,255,252,242,15,16,2,252,242, + 15,88,193,102,15,126,197,255,248,2,137,68,36,84,141,68,194,252,240,248,1, + 57,208,15,134,244,88,129,120,253,4,239,255,15,133,244,248,35,40,131,232,8, + 252,233,244,1,248,2,15,135,244,134,255,15,131,244,134,255,252,242,15,16,0, + 252,242,15,88,193,102,15,126,193,33,205,255,131,232,8,252,233,244,1,248,135, + 129,252,248,239,15,130,244,62,255,15,133,244,248,11,40,131,232,8,252,233, + 244,1,248,2,15,135,244,134,255,252,242,15,16,0,252,242,15,88,193,102,15,126, + 193,9,205,255,131,232,8,252,233,244,1,248,136,129,252,248,239,15,130,244, + 62,255,15,133,244,248,51,40,131,232,8,252,233,244,1,248,2,15,135,244,134, + 255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,49,205,255,131,232,8, + 252,233,244,1,248,137,129,252,248,239,15,130,244,62,129,122,253,4,239,255, + 248,2,15,205,252,233,244,88,248,138,129,252,248,239,15,130,244,62,129,122, + 253,4,239,255,248,2,252,247,213,255,248,88,252,242,15,42,197,252,233,244, + 71,255,248,134,139,68,36,84,252,233,244,62,255,248,139,129,252,248,239,15, + 130,244,62,129,122,253,4,239,255,248,2,129,122,253,12,239,15,133,244,62,139, + 74,8,255,248,139,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244, + 62,129,122,253,12,239,15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,72, + 189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102,15, + 126,197,102,15,126,201,255,211,229,252,233,244,88,255,248,140,129,252,248, + 239,15,130,244,62,129,122,253,4,239,255,248,140,129,252,248,239,15,130,244, + 62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242, + 15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88, + 194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,211,252,237,252,233, + 244,88,255,248,141,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248, + 141,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122, + 253,12,239,15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237, + 102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102, + 15,126,201,255,211,252,253,252,233,244,88,255,248,142,129,252,248,239,15, + 130,244,62,129,122,253,4,239,255,248,142,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242,15,16, + 2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252, + 242,15,88,202,102,15,126,197,102,15,126,201,255,211,197,252,233,244,88,255, + 248,143,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,143,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239, + 15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15, + 110,213,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201, + 255,211,205,252,233,244,88,248,126,184,237,252,233,244,62,248,128,184,237, + 248,62,139,108,36,96,139,114,252,252,137,116,36,100,137,149,233,141,68,194, + 252,248,141,136,233,137,133,233,139,66,252,248,59,141,233,15,135,244,251, + 137,252,233,252,255,144,233,139,149,233,133,192,15,143,244,77,248,1,255,139, + 141,233,41,209,193,252,233,3,133,192,141,65,1,139,106,252,248,15,133,244, + 32,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,252, + 235,248,32,137,209,252,247,198,237,15,133,244,249,15,182,110,252,253,72,252, + 247,213,141,20,252,234,252,233,244,28,248,3,137,252,245,131,229,252,248,41, + 252,234,252,233,244,28,248,5,186,237,137,252,233,232,251,1,0,139,149,233, + 49,192,252,233,244,1,248,74,93,72,137,108,36,32,139,108,36,96,137,116,36, + 100,137,149,233,255,141,68,194,252,248,137,252,233,137,133,233,232,251,1, + 21,139,149,233,139,133,233,41,208,193,232,3,131,192,1,72,139,108,36,32,85, + 195,248,144,255,15,182,131,233,168,235,15,133,244,251,168,235,15,133,244, + 247,168,235,15,132,244,247,252,255,139,233,252,233,244,247,255,248,145,15, + 182,131,233,168,235,15,133,244,251,252,233,244,247,248,146,15,182,131,233, + 168,235,15,133,244,251,168,235,15,132,244,251,252,255,139,233,15,132,244, + 247,168,235,15,132,244,251,248,1,255,139,108,36,96,137,149,233,137,252,242, + 137,252,233,232,251,1,22,248,3,139,149,233,248,4,15,182,78,252,253,248,5, + 15,182,110,252,252,15,183,70,252,254,252,255,164,253,252,235,233,248,147, + 131,198,4,139,77,232,137,76,36,84,252,233,244,4,248,148,255,139,106,252,248, + 139,173,233,15,182,133,233,141,4,194,139,108,36,96,137,149,233,137,133,233, + 137,252,242,141,139,233,72,137,171,233,137,116,36,100,232,251,1,23,252,233, + 244,3,255,248,149,137,116,36,100,255,248,150,255,137,116,36,100,131,206,1, + 248,1,255,141,68,194,252,248,139,108,36,96,137,149,233,137,133,233,137,252, + 242,137,252,233,232,251,1,24,199,68,36,100,0,0,0,0,255,131,230,252,254,255, + 139,149,233,72,137,193,139,133,233,41,208,72,137,205,15,182,78,252,253,193, + 232,3,131,192,1,252,255,229,248,151,255,65,85,65,84,65,83,65,82,65,81,65, + 80,87,86,85,72,141,108,36,88,85,83,82,81,80,15,182,69,252,248,138,101,252, + 240,76,137,125,252,248,76,137,117,252,240,139,93,0,139,139,233,199,131,233, + 237,137,131,233,137,139,233,72,129,252,236,239,72,131,197,128,252,242,68, + 15,17,125,252,248,252,242,68,15,17,117,252,240,252,242,68,15,17,109,232,252, + 242,68,15,17,101,224,252,242,68,15,17,93,216,252,242,68,15,17,85,208,252, + 242,68,15,17,77,200,252,242,68,15,17,69,192,252,242,15,17,125,184,252,242, + 15,17,117,176,252,242,15,17,109,168,252,242,15,17,101,160,252,242,15,17,93, + 152,252,242,15,17,85,144,252,242,15,17,77,136,252,242,15,17,69,128,139,171, + 233,139,147,233,72,137,171,233,199,131,233,0,0,0,0,137,149,233,72,141,148, + 253,36,233,141,139,233,232,251,1,25,72,139,141,233,72,129,225,239,137,169, + 233,139,149,233,139,177,233,252,233,244,247,255,248,152,255,72,141,140,253, + 36,233,248,1,102,68,15,111,185,233,102,68,15,111,177,233,102,68,15,111,169, + 233,102,68,15,111,161,233,102,68,15,111,153,233,102,68,15,111,145,233,102, + 68,15,111,137,233,102,68,15,111,129,233,102,15,111,185,233,72,137,204,102, + 15,111,49,76,139,124,36,16,76,139,116,36,24,76,139,108,36,32,76,139,100,36, + 80,133,192,15,136,244,249,137,68,36,84,139,122,252,248,139,191,233,139,191, + 233,199,131,233,0,0,0,0,199,131,233,237,139,6,15,182,204,15,182,232,131,198, + 4,193,232,16,129,252,253,239,15,130,244,248,255,139,68,36,84,248,2,252,255, + 36,252,235,248,3,252,247,216,137,252,233,137,194,232,251,1,26,255,248,90, + 255,217,124,36,4,137,68,36,8,102,184,0,4,102,11,68,36,4,102,37,252,255,252, + 247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,68,36,8,195, + 255,248,153,72,184,237,237,102,72,15,110,208,72,184,237,237,102,72,15,110, + 216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15,85,208,252, + 242,15,88,203,252,242,15,92,203,102,15,86,202,72,184,237,237,102,72,15,110, + 208,252,242,15,194,193,1,102,15,84,194,252,242,15,92,200,15,40,193,248,1, + 195,248,92,255,217,124,36,4,137,68,36,8,102,184,0,8,102,11,68,36,4,102,37, + 252,255,252,251,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139, + 68,36,8,195,255,248,154,72,184,237,237,102,72,15,110,208,72,184,237,237,102, + 72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15, + 85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,72,184,237,237,102, + 72,15,110,208,252,242,15,194,193,6,102,15,84,194,252,242,15,92,200,15,40, + 193,248,1,195,248,113,255,217,124,36,4,137,68,36,8,102,184,0,12,102,11,68, + 36,4,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,68,36,8,195, + 255,248,155,72,184,237,237,102,72,15,110,208,72,184,237,237,102,72,15,110, + 216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15,85,208,15, + 40,193,252,242,15,88,203,252,242,15,92,203,72,184,237,237,102,72,15,110,216, + 252,242,15,194,193,1,102,15,84,195,252,242,15,92,200,102,15,86,202,15,40, + 193,248,1,195,248,156,255,15,40,232,252,242,15,94,193,72,184,237,237,102, + 72,15,110,208,72,184,237,237,102,72,15,110,216,15,40,224,102,15,84,226,102, + 15,46,220,15,134,244,247,102,15,85,208,252,242,15,88,227,252,242,15,92,227, + 102,15,86,226,72,184,237,237,102,72,15,110,208,252,242,15,194,196,1,102,15, + 84,194,252,242,15,92,224,15,40,197,252,242,15,89,204,252,242,15,92,193,195, + 248,1,252,242,15,89,200,15,40,197,252,242,15,92,193,195,255,217,193,216,252, + 241,217,124,36,4,102,184,0,4,102,11,68,36,4,102,37,252,255,252,247,102,137, + 68,36,6,217,108,36,6,217,252,252,217,108,36,4,222,201,222,252,233,195,255, + 248,97,217,252,234,222,201,248,157,217,84,36,8,129,124,36,8,0,0,128,127,15, + 132,244,247,129,124,36,8,0,0,128,252,255,15,132,244,248,248,158,217,192,217, + 252,252,220,252,233,217,201,217,252,240,217,232,222,193,217,252,253,221,217, + 248,1,195,248,2,221,216,217,252,238,195,255,248,116,255,248,159,252,242,15, + 45,193,252,242,15,42,208,102,15,46,202,15,133,244,254,15,138,244,255,248, + 160,131,252,248,1,15,142,244,252,248,1,169,1,0,0,0,15,133,244,248,252,242, + 15,89,192,209,232,252,233,244,1,248,2,209,232,15,132,244,251,15,40,200,248, + 3,252,242,15,89,192,209,232,15,132,244,250,15,131,244,3,255,252,242,15,89, + 200,252,233,244,3,248,4,252,242,15,89,193,248,5,195,248,6,15,132,244,5,15, + 130,244,253,252,247,216,232,244,1,72,184,237,237,102,72,15,110,200,252,242, + 15,94,200,15,40,193,195,248,7,72,184,237,237,102,72,15,110,192,195,248,8, + 102,72,15,126,200,72,209,224,72,193,192,12,72,61,252,254,15,0,0,15,132,244, + 248,102,72,15,126,192,72,209,224,15,132,244,250,255,72,193,192,12,72,61,252, + 254,15,0,0,15,132,244,251,252,242,15,17,76,36,16,252,242,15,17,68,36,8,221, + 68,36,16,221,68,36,8,217,252,241,217,192,217,252,252,220,252,233,217,201, + 217,252,240,217,232,222,193,217,252,253,221,217,221,92,36,8,252,242,15,16, + 68,36,8,195,248,9,72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244, + 247,15,40,193,248,1,195,248,2,72,184,237,237,102,72,15,110,208,102,15,84, + 194,72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244,1,102,15,80, + 193,15,87,192,136,196,15,146,208,48,224,15,133,244,1,248,3,72,184,237,237, + 255,102,72,15,110,192,195,248,4,102,15,80,193,133,192,15,133,244,3,15,87, + 192,195,248,5,102,15,80,193,133,192,15,132,244,3,15,87,192,195,248,161,255, + 131,252,250,1,15,130,244,90,15,132,244,92,131,252,250,3,15,130,244,113,15, + 135,244,248,252,242,15,81,192,195,248,2,252,242,15,17,68,36,8,221,68,36,8, + 131,252,250,5,15,135,244,248,88,15,132,244,247,232,244,97,80,252,233,244, + 253,248,1,232,244,157,255,80,252,233,244,253,248,2,131,252,250,7,15,132,244, + 247,15,135,244,248,217,252,237,217,201,217,252,241,252,233,244,253,248,1, + 217,232,217,201,217,252,241,252,233,244,253,248,2,131,252,250,9,15,132,244, + 247,15,135,244,248,217,252,236,217,201,217,252,241,252,233,244,253,248,1, + 255,217,252,254,252,233,244,253,248,2,131,252,250,11,15,132,244,247,15,135, + 244,255,217,252,255,252,233,244,253,248,1,217,252,242,221,216,248,7,221,92, + 36,8,252,242,15,16,68,36,8,195,255,139,84,36,12,221,68,36,4,131,252,250,1, + 15,130,244,90,15,132,244,92,131,252,250,3,15,130,244,113,15,135,244,248,217, + 252,250,195,248,2,131,252,250,5,15,130,244,97,15,132,244,157,131,252,250, + 7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252,241,195,248,1, + 217,232,217,201,217,252,241,195,248,2,131,252,250,9,15,132,244,247,255,15, + 135,244,248,217,252,236,217,201,217,252,241,195,248,1,217,252,254,195,248, + 2,131,252,250,11,15,132,244,247,15,135,244,255,217,252,255,195,248,1,217, + 252,242,221,216,195,255,248,9,204,255,248,162,255,65,131,252,248,1,15,132, + 244,247,15,135,244,248,252,242,15,88,193,195,248,1,252,242,15,92,193,195, + 248,2,65,131,252,248,3,15,132,244,247,15,135,244,248,252,242,15,89,193,195, + 248,1,252,242,15,94,193,195,248,2,65,131,252,248,5,15,130,244,156,15,132, + 244,116,65,131,252,248,7,15,132,244,247,15,135,244,248,72,184,237,237,255, + 102,72,15,110,200,15,87,193,195,248,1,72,184,237,237,102,72,15,110,200,15, + 84,193,195,248,2,65,131,252,248,9,15,135,244,248,252,242,15,17,68,36,8,252, + 242,15,17,76,36,16,221,68,36,8,221,68,36,16,15,132,244,247,217,252,243,248, + 7,221,92,36,8,252,242,15,16,68,36,8,195,248,1,217,201,217,252,253,221,217, + 252,233,244,7,248,2,65,131,252,248,11,15,132,244,247,15,135,244,255,252,242, + 15,93,193,195,248,1,252,242,15,95,193,195,248,9,204,255,139,68,36,20,221, + 68,36,4,221,68,36,12,131,252,248,1,15,132,244,247,15,135,244,248,222,193, + 195,248,1,222,252,233,195,248,2,131,252,248,3,15,132,244,247,15,135,244,248, + 222,201,195,248,1,222,252,249,195,248,2,131,252,248,5,15,130,244,156,15,132, + 244,116,131,252,248,7,15,132,244,247,15,135,244,248,255,221,216,217,224,195, + 248,1,221,216,217,225,195,248,2,131,252,248,9,15,132,244,247,15,135,244,248, + 217,252,243,195,248,1,217,201,217,252,253,221,217,195,248,2,131,252,248,11, + 15,132,244,247,15,135,244,255,255,219,252,233,219,209,221,217,195,248,1,219, + 252,233,218,209,221,217,195,255,221,225,223,224,252,246,196,1,15,132,244, + 248,217,201,248,2,221,216,195,248,1,221,225,223,224,252,246,196,1,15,133, + 244,248,217,201,248,2,221,216,195,255,248,163,137,200,86,72,137,214,83,15, + 162,137,6,137,94,4,137,78,8,137,86,12,91,94,195,248,164,255,85,72,137,229, + 83,72,137,203,139,131,233,72,41,196,255,15,182,139,233,131,252,233,1,15,136, + 244,248,248,1,72,139,132,253,203,233,72,137,132,253,204,233,131,252,233,1, + 15,137,244,1,248,2,15,182,131,233,72,139,139,233,72,139,147,233,76,139,131, + 233,76,139,139,233,133,192,15,132,244,251,15,40,131,233,15,40,139,233,15, + 40,147,233,15,40,155,233,248,5,255,252,255,147,233,72,137,131,233,15,41,131, + 233,255,72,139,93,252,248,201,195,255,248,165,255,249,255,129,124,253,202, + 4,239,15,133,244,253,129,124,253,194,4,239,15,133,244,254,139,44,202,131, + 198,4,59,44,194,255,15,141,244,255,255,15,140,244,255,255,15,143,244,255, + 255,15,142,244,255,255,248,6,15,183,70,252,254,141,180,253,134,233,248,9, + 139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,7, + 15,135,244,43,129,124,253,194,4,239,15,130,244,247,15,133,244,43,255,252, + 242,15,42,4,194,252,233,244,248,255,221,4,202,219,4,194,252,233,244,249,255, + 248,8,15,135,244,43,255,252,242,15,42,12,202,252,242,15,16,4,194,131,198, + 4,102,15,46,193,255,15,134,244,9,255,15,135,244,9,255,15,130,244,9,255,15, + 131,244,9,255,252,233,244,6,255,219,4,202,252,233,244,248,255,129,124,253, + 202,4,239,15,131,244,43,129,124,253,194,4,239,15,131,244,43,255,248,1,252, + 242,15,16,4,194,248,2,131,198,4,102,15,46,4,202,248,3,255,248,1,221,4,202, + 248,2,221,4,194,248,3,131,198,4,255,223,252,233,221,216,255,218,252,233,223, + 224,158,255,15,134,244,247,255,15,135,244,247,255,15,130,244,247,255,15,131, + 244,247,255,15,183,70,252,254,141,180,253,134,233,248,1,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,252,255,36,252,235,255,139,108,194,4,131, + 198,4,255,129,252,253,239,15,133,244,253,129,124,253,202,4,239,15,133,244, + 254,139,44,194,59,44,202,255,15,133,244,255,255,15,132,244,255,255,15,183, + 70,252,254,141,180,253,134,233,248,9,139,6,15,182,204,15,182,232,131,198, + 4,193,232,16,252,255,36,252,235,248,7,15,135,244,251,129,124,253,202,4,239, + 15,130,244,247,15,133,244,251,255,252,242,15,42,4,202,255,219,4,202,255,252, + 233,244,248,248,8,15,135,244,251,255,252,242,15,42,4,194,102,15,46,4,202, + 255,219,4,194,221,4,202,255,252,233,244,250,255,129,252,253,239,15,131,244, + 251,129,124,253,202,4,239,15,131,244,251,255,248,1,252,242,15,16,4,202,248, + 2,102,15,46,4,194,248,4,255,248,1,221,4,202,248,2,221,4,194,248,4,255,15, + 138,244,248,15,133,244,248,255,15,138,244,248,15,132,244,247,255,248,1,15, + 183,70,252,254,141,180,253,134,233,248,2,255,248,2,15,183,70,252,254,141, + 180,253,134,233,248,1,255,252,233,244,9,255,129,252,253,239,15,132,244,48, + 129,124,253,202,4,239,15,132,244,48,255,57,108,202,4,15,133,244,2,129,252, + 253,239,15,131,244,1,139,12,202,139,4,194,57,193,15,132,244,1,129,252,253, + 239,15,135,244,2,139,169,233,133,252,237,15,132,244,2,252,246,133,233,235, + 15,133,244,2,255,49,252,237,255,189,1,0,0,0,255,252,233,244,47,255,248,3, + 129,252,253,239,255,15,133,244,9,255,252,233,244,48,255,72,252,247,208,139, + 108,202,4,131,198,4,129,252,253,239,15,133,244,249,139,12,202,59,12,135,255, + 139,108,202,4,131,198,4,255,129,252,253,239,15,133,244,253,129,124,253,199, + 4,239,15,133,244,254,139,44,199,59,44,202,255,15,183,70,252,254,141,180,253, + 134,233,248,9,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36, + 252,235,248,7,15,135,244,249,129,124,253,199,4,239,15,130,244,247,255,252, + 242,15,42,4,199,255,219,4,199,255,252,233,244,248,248,8,255,252,242,15,42, + 4,202,102,15,46,4,199,255,219,4,202,221,4,199,255,129,252,253,239,15,131, + 244,249,255,248,1,252,242,15,16,4,199,248,2,102,15,46,4,202,248,4,255,248, + 1,221,4,199,248,2,221,4,202,248,4,255,72,252,247,208,139,108,202,4,131,198, + 4,57,197,255,15,133,244,249,15,183,70,252,254,141,180,253,134,233,248,2,139, + 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,129, + 252,253,239,15,133,244,2,252,233,244,48,255,15,132,244,248,129,252,253,239, + 15,132,244,48,15,183,70,252,254,141,180,253,134,233,248,2,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,252,255,36,252,235,255,139,108,194,4,131, + 198,4,129,252,253,239,255,137,108,202,4,139,44,194,137,44,202,255,72,139, + 44,194,72,137,44,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,252,235,255,49,252,237,129,124,253,194,4,239,129,213,239,137,108,202, + 4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,255, + 129,124,253,194,4,239,15,133,244,251,139,44,194,252,247,221,15,128,244,250, + 199,68,202,4,237,137,44,202,248,9,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,252,235,248,4,199,68,202,4,0,0,224,65,199,4,202,0,0,0,0, + 252,233,244,9,248,5,15,135,244,53,255,129,124,253,194,4,239,15,131,244,53, + 255,252,242,15,16,4,194,72,184,237,237,102,72,15,110,200,15,87,193,252,242, + 15,17,4,202,255,221,4,194,217,224,221,28,202,255,129,124,253,194,4,239,15, + 133,244,248,139,4,194,255,139,128,233,248,1,199,68,202,4,237,137,4,202,255, + 15,87,192,252,242,15,42,128,233,248,1,252,242,15,17,4,202,255,219,128,233, + 248,1,221,28,202,255,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,252,235,248,2,129,124,253,194,4,239,15,133,244,56,139,12,194,255,139, + 169,233,131,252,253,0,15,133,244,255,248,3,255,248,57,137,213,232,251,1,20, + 255,252,242,15,42,192,255,137,252,234,15,182,78,252,253,252,233,244,1,255, + 248,9,252,246,133,233,235,15,133,244,3,252,233,244,56,255,15,182,252,236, + 15,182,192,255,129,124,253,252,234,4,239,15,133,244,50,129,124,253,199,4, + 239,15,133,244,50,139,44,252,234,3,44,199,15,128,244,49,255,129,124,253,252, + 234,4,239,15,133,244,52,129,124,253,199,4,239,15,133,244,52,139,4,199,3,4, + 252,234,15,128,244,51,255,129,124,253,252,234,4,239,15,133,244,55,129,124, + 253,194,4,239,15,133,244,55,139,44,252,234,3,44,194,15,128,244,54,255,199, + 68,202,4,237,255,129,124,253,252,234,4,239,15,131,244,50,255,129,124,253, + 199,4,239,15,131,244,50,255,252,242,15,16,4,252,234,252,242,15,88,4,199,255, + 221,4,252,234,220,4,199,255,129,124,253,252,234,4,239,15,131,244,52,255,129, + 124,253,199,4,239,15,131,244,52,255,252,242,15,16,4,199,252,242,15,88,4,252, + 234,255,221,4,199,220,4,252,234,255,129,124,253,252,234,4,239,15,131,244, + 55,129,124,253,194,4,239,15,131,244,55,255,252,242,15,16,4,252,234,252,242, + 15,88,4,194,255,221,4,252,234,220,4,194,255,129,124,253,252,234,4,239,15, + 133,244,50,129,124,253,199,4,239,15,133,244,50,139,44,252,234,43,44,199,15, + 128,244,49,255,129,124,253,252,234,4,239,15,133,244,52,129,124,253,199,4, + 239,15,133,244,52,139,4,199,43,4,252,234,15,128,244,51,255,129,124,253,252, + 234,4,239,15,133,244,55,129,124,253,194,4,239,15,133,244,55,139,44,252,234, + 43,44,194,15,128,244,54,255,252,242,15,16,4,252,234,252,242,15,92,4,199,255, + 221,4,252,234,220,36,199,255,252,242,15,16,4,199,252,242,15,92,4,252,234, + 255,221,4,199,220,36,252,234,255,252,242,15,16,4,252,234,252,242,15,92,4, + 194,255,221,4,252,234,220,36,194,255,129,124,253,252,234,4,239,15,133,244, + 50,129,124,253,199,4,239,15,133,244,50,139,44,252,234,15,175,44,199,15,128, + 244,49,255,129,124,253,252,234,4,239,15,133,244,52,129,124,253,199,4,239, + 15,133,244,52,139,4,199,15,175,4,252,234,15,128,244,51,255,129,124,253,252, + 234,4,239,15,133,244,55,129,124,253,194,4,239,15,133,244,55,139,44,252,234, + 15,175,44,194,15,128,244,54,255,252,242,15,16,4,252,234,252,242,15,89,4,199, + 255,221,4,252,234,220,12,199,255,252,242,15,16,4,199,252,242,15,89,4,252, + 234,255,221,4,199,220,12,252,234,255,252,242,15,16,4,252,234,252,242,15,89, + 4,194,255,221,4,252,234,220,12,194,255,252,242,15,16,4,252,234,252,242,15, + 94,4,199,255,221,4,252,234,220,52,199,255,252,242,15,16,4,199,252,242,15, + 94,4,252,234,255,221,4,199,220,52,252,234,255,252,242,15,16,4,252,234,252, + 242,15,94,4,194,255,221,4,252,234,220,52,194,255,252,242,15,16,4,252,234, + 252,242,15,16,12,199,255,221,4,252,234,221,4,199,255,252,242,15,16,4,199, + 252,242,15,16,12,252,234,255,221,4,199,221,4,252,234,255,252,242,15,16,4, + 252,234,252,242,15,16,12,194,255,221,4,252,234,221,4,194,255,248,166,232, + 244,156,255,252,233,244,166,255,232,244,116,255,15,182,252,236,15,182,192, + 139,76,36,96,137,145,233,141,20,194,65,137,192,65,41,232,248,35,137,205,137, + 116,36,100,232,251,1,27,139,149,233,133,192,15,133,244,44,15,182,110,252, + 255,15,182,78,252,253,72,139,4,252,234,72,137,4,202,139,6,15,182,204,15,182, + 232,131,198,4,193,232,16,252,255,36,252,235,255,72,252,247,208,139,4,135, + 199,68,202,4,237,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193,232, + 16,252,255,36,252,235,255,15,191,192,199,68,202,4,237,137,4,202,255,15,191, + 192,252,242,15,42,192,252,242,15,17,4,202,255,223,70,252,254,221,28,202,255, + 252,242,15,16,4,199,252,242,15,17,4,202,255,221,4,199,221,28,202,255,72,252, + 247,208,137,68,202,4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,252,235,255,141,76,202,12,141,68,194,4,189,237,137,105,252,248,248, + 1,137,41,131,193,8,57,193,15,134,244,1,139,6,15,182,204,15,182,232,131,198, + 4,193,232,16,252,255,36,252,235,255,139,106,252,248,139,172,253,133,233,139, + 173,233,72,139,69,0,72,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,252,235,255,139,106,252,248,139,172,253,141,233,128,189, + 233,0,139,173,233,139,12,194,139,68,194,4,137,77,0,137,69,4,15,132,244,247, + 252,246,133,233,235,15,133,244,248,248,1,139,6,15,182,204,15,182,232,131, + 198,4,193,232,16,252,255,36,252,235,248,2,129,232,239,129,252,248,239,15, + 134,244,1,252,246,129,233,235,15,132,244,1,135,213,141,139,233,255,232,251, + 1,28,137,252,234,252,233,244,1,255,72,252,247,208,139,106,252,248,139,172, + 253,141,233,139,12,135,139,133,233,137,8,199,64,4,237,252,246,133,233,235, + 15,133,244,248,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,252,235,248,2,252,246,129,233,235,15,132,244,1,128,189,233,0,15,132, + 244,1,137,213,137,194,141,139,233,232,251,1,28,137,252,234,252,233,244,1, + 255,139,106,252,248,255,252,242,15,16,4,199,255,139,172,253,141,233,139,141, + 233,255,252,242,15,17,1,255,221,25,255,72,252,247,208,139,106,252,248,139, + 172,253,141,233,139,141,233,137,65,4,139,6,15,182,204,15,182,232,131,198, + 4,193,232,16,252,255,36,252,235,255,141,180,253,134,233,139,108,36,96,131, + 189,233,0,15,132,244,247,137,149,233,141,20,202,137,252,233,232,251,1,29, + 139,149,233,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255, + 36,252,235,255,72,252,247,208,139,108,36,96,137,149,233,68,139,66,252,248, + 139,20,135,137,252,233,137,116,36,100,232,251,1,30,139,149,233,15,182,78, + 252,253,137,4,202,199,68,202,4,237,139,6,15,182,204,15,182,232,131,198,4, + 193,232,16,252,255,36,252,235,255,139,108,36,96,137,149,233,139,139,233,59, + 139,233,137,116,36,100,15,131,244,251,248,1,65,137,192,37,252,255,7,0,0,65, + 193,232,11,61,252,255,7,0,0,15,132,244,249,248,2,137,252,233,137,194,232, + 251,1,31,139,149,233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15, + 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,184,1,8, + 0,0,252,233,244,2,248,5,137,252,233,232,251,1,32,15,183,70,252,254,252,233, + 244,1,255,72,252,247,208,139,108,36,96,139,139,233,137,116,36,100,59,139, + 233,137,149,233,15,131,244,249,248,2,139,20,135,137,252,233,232,251,1,33, + 139,149,233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,137,252,233,232, + 251,1,32,15,183,70,252,254,72,252,247,208,252,233,244,2,255,72,252,247,208, + 139,106,252,248,139,173,233,139,4,135,252,233,244,167,255,72,252,247,208, + 139,106,252,248,139,173,233,139,4,135,252,233,244,168,255,15,182,252,236, + 15,182,192,129,124,253,252,234,4,239,15,133,244,38,139,44,252,234,255,129, + 124,253,194,4,239,15,133,244,251,139,4,194,255,129,124,253,194,4,239,15,131, + 244,251,255,252,242,15,16,4,194,252,242,15,45,192,252,242,15,42,200,102,15, + 46,193,255,15,133,244,38,255,59,133,233,15,131,244,38,193,224,3,3,133,233, + 129,120,253,4,239,15,132,244,248,72,139,40,72,137,44,202,248,1,139,6,15,182, + 204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,2,131,189,233, + 0,15,132,244,249,139,141,233,252,246,129,233,235,15,132,244,38,15,182,78, + 252,253,248,3,199,68,202,4,237,252,233,244,1,248,5,255,129,124,253,194,4, + 239,15,133,244,38,139,4,194,252,233,244,167,255,15,182,252,236,15,182,192, + 72,252,247,208,139,4,135,129,124,253,252,234,4,239,15,133,244,36,139,44,252, + 234,248,167,139,141,233,35,136,233,105,201,239,3,141,233,248,1,129,185,233, + 239,15,133,244,250,57,129,233,15,133,244,250,129,121,253,4,239,15,132,244, + 251,15,182,70,252,253,72,139,41,72,137,44,194,248,2,255,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,15,182,70,252,253, + 199,68,194,4,237,252,233,244,2,248,4,139,137,233,133,201,15,133,244,1,248, + 5,139,141,233,133,201,15,132,244,3,252,246,129,233,235,15,133,244,3,252,233, + 244,36,255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244, + 37,139,44,252,234,59,133,233,15,131,244,37,193,224,3,3,133,233,129,120,253, + 4,239,15,132,244,248,72,139,40,72,137,44,202,248,1,139,6,15,182,204,15,182, + 232,131,198,4,193,232,16,252,255,36,252,235,248,2,131,189,233,0,15,132,244, + 249,139,141,233,252,246,129,233,235,15,132,244,37,255,15,182,78,252,253,248, + 3,199,68,202,4,237,252,233,244,1,255,15,182,252,236,15,182,192,129,124,253, + 252,234,4,239,15,133,244,41,139,44,252,234,255,15,133,244,41,255,59,133,233, + 15,131,244,41,193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248,1, + 252,246,133,233,235,15,133,244,253,248,2,72,139,44,202,72,137,40,139,6,15, + 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,131,189, + 233,0,15,132,244,1,139,141,233,252,246,129,233,235,255,15,132,244,41,15,182, + 78,252,253,252,233,244,1,248,5,129,124,253,194,4,239,15,133,244,41,139,4, + 194,252,233,244,168,248,7,128,165,233,235,139,139,233,137,171,233,137,141, + 233,15,182,78,252,253,252,233,244,2,255,15,182,252,236,15,182,192,72,252, + 247,208,139,4,135,129,124,253,252,234,4,239,15,133,244,39,139,44,252,234, + 248,168,139,141,233,35,136,233,105,201,239,198,133,233,0,3,141,233,248,1, + 129,185,233,239,15,133,244,251,57,129,233,15,133,244,251,129,121,253,4,239, + 15,132,244,250,248,2,255,252,246,133,233,235,15,133,244,253,248,3,15,182, + 70,252,253,72,139,44,194,72,137,41,139,6,15,182,204,15,182,232,131,198,4, + 193,232,16,252,255,36,252,235,248,4,131,189,233,0,15,132,244,2,137,76,36, + 80,139,141,233,252,246,129,233,235,15,132,244,39,139,76,36,80,252,233,244, + 2,248,5,139,137,233,133,201,15,133,244,1,255,139,141,233,133,201,15,132,244, + 252,252,246,129,233,235,15,132,244,39,248,6,137,68,36,80,199,68,36,84,237, + 137,108,36,32,139,76,36,96,137,145,233,76,141,68,36,80,137,252,234,137,205, + 137,116,36,100,232,251,1,34,139,149,233,139,108,36,32,137,193,252,233,244, + 2,248,7,128,165,233,235,139,131,233,137,171,233,137,133,233,252,233,244,3, + 255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,40,139, + 44,252,234,59,133,233,15,131,244,40,193,224,3,3,133,233,129,120,253,4,239, + 15,132,244,249,248,1,252,246,133,233,235,15,133,244,253,248,2,72,139,12,202, + 72,137,8,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252, + 235,248,3,131,189,233,0,15,132,244,1,255,139,141,233,252,246,129,233,235, + 15,132,244,40,15,182,78,252,253,252,233,244,1,248,7,128,165,233,235,139,139, + 233,137,171,233,137,141,233,15,182,78,252,253,252,233,244,2,255,137,124,36, + 80,139,60,199,248,1,141,12,202,139,105,252,248,252,246,133,233,235,15,133, + 244,253,248,2,139,68,36,84,131,232,1,15,132,244,250,1,252,248,59,133,233, + 15,135,244,251,41,252,248,193,231,3,3,189,233,248,3,72,139,41,131,193,8,72, + 137,47,131,199,8,131,232,1,15,133,244,3,248,4,139,124,36,80,139,6,15,182, + 204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,5,139,76,36,96, + 137,145,233,137,252,234,65,137,192,137,205,137,116,36,100,232,251,1,35,139, + 149,233,15,182,78,252,253,252,233,244,1,248,7,255,128,165,233,235,139,131, + 233,137,171,233,137,133,233,252,233,244,2,255,3,68,36,84,255,129,124,253, + 202,4,239,139,44,202,15,133,244,58,141,84,202,8,137,114,252,252,139,181,233, + 139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,252,235,255,141,76, + 202,8,137,215,139,105,252,248,129,121,253,252,252,239,15,133,244,29,248,59, + 139,114,252,252,252,247,198,237,15,133,244,253,248,1,137,106,252,248,137, + 68,36,84,131,232,1,15,132,244,249,248,2,72,139,41,131,193,8,72,137,47,131, + 199,8,131,232,1,15,133,244,2,139,106,252,248,248,3,139,68,36,84,128,189,233, + 1,15,135,244,251,248,4,139,181,233,139,14,15,182,252,233,15,182,205,131,198, + 4,252,255,36,252,235,248,5,255,252,247,198,237,15,133,244,4,15,182,78,252, + 253,72,252,247,209,141,12,202,139,121,252,248,139,191,233,139,191,233,252, + 233,244,4,248,7,129,252,238,239,252,247,198,237,15,133,244,254,41,252,242, + 137,215,139,114,252,252,252,233,244,1,248,8,129,198,239,252,233,244,1,255, + 141,76,202,8,72,139,105,232,72,139,65,252,240,72,137,41,72,137,65,8,139,105, + 224,139,65,228,137,105,252,248,137,65,252,252,129,252,248,239,184,237,15, + 133,244,29,137,202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182, + 205,131,198,4,252,255,36,252,235,255,137,124,36,80,137,92,36,84,139,108,202, + 252,240,139,68,202,252,248,139,157,233,131,198,4,139,189,233,248,1,57,216, + 15,131,244,251,129,124,253,199,4,239,15,132,244,250,255,219,68,202,252,248, + 255,72,139,44,199,72,137,108,202,8,131,192,1,255,137,68,202,252,248,248,2, + 15,183,70,252,254,141,180,253,134,233,248,3,139,92,36,84,139,124,36,80,139, + 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,4,131, + 192,1,255,137,68,202,252,248,255,252,233,244,1,248,5,41,216,248,6,59,133, + 233,15,135,244,3,105,252,248,239,3,189,233,129,191,233,239,15,132,244,253, + 141,92,24,1,72,139,175,233,72,139,135,233,72,137,44,202,72,137,68,202,8,137, + 92,202,252,248,252,233,244,2,248,7,131,192,1,252,233,244,6,255,129,124,253, + 202,252,236,239,15,133,244,251,139,108,202,232,129,124,253,202,252,244,239, + 15,133,244,251,129,124,253,202,252,252,239,15,133,244,251,128,189,233,235, + 15,133,244,251,141,180,253,134,233,199,68,202,252,248,0,0,0,0,248,1,139,6, + 15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,5,198,70, + 252,252,235,141,180,253,134,233,198,6,235,252,233,244,1,255,15,182,252,236, + 15,182,192,137,124,36,80,141,188,253,194,233,141,12,202,43,122,252,252,133, + 252,237,15,132,244,251,141,108,252,233,252,248,57,215,15,131,244,248,248, + 1,72,139,71,252,248,131,199,8,72,137,1,131,193,8,57,252,233,15,131,244,249, + 57,215,15,130,244,1,248,2,199,65,4,237,131,193,8,57,252,233,15,130,244,2, + 248,3,139,124,36,80,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,252,235,248,5,199,68,36,84,1,0,0,0,137,208,41,252,248,15,134,244,3, + 137,197,193,252,237,3,131,197,1,137,108,36,84,139,108,36,96,1,200,59,133, + 233,15,135,244,253,248,6,255,72,139,71,252,248,131,199,8,72,137,1,131,193, + 8,57,215,15,130,244,6,252,233,244,3,248,7,137,149,233,137,141,233,137,116, + 36,100,41,215,139,84,36,84,131,252,234,1,137,252,233,232,251,1,0,139,149, + 233,139,141,233,1,215,252,233,244,6,255,193,225,3,255,248,1,139,114,252,252, + 137,68,36,84,252,247,198,237,15,133,244,253,255,248,13,137,215,131,232,1, + 15,132,244,249,248,2,72,139,44,15,72,137,111,252,248,131,199,8,131,232,1, + 15,133,244,2,248,3,139,68,36,84,15,182,110,252,255,248,5,57,197,15,135,244, + 252,255,72,139,44,10,72,137,106,252,248,255,248,5,56,70,252,255,15,135,244, + 252,255,15,182,78,252,253,72,252,247,209,141,20,202,139,122,252,248,139,191, + 233,139,191,233,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255, + 36,252,235,248,6,255,199,71,252,252,237,131,199,8,255,199,68,194,252,244, + 237,255,131,192,1,252,233,244,5,248,7,141,174,233,252,247,197,237,15,133, + 244,14,41,252,234,255,1,252,233,255,137,252,245,209,252,237,129,229,239,102, + 131,172,253,43,233,1,15,132,244,148,255,141,12,202,255,129,121,253,4,239, + 15,133,244,255,255,129,121,253,12,239,15,133,244,60,129,121,253,20,239,15, + 133,244,60,139,41,131,121,16,0,15,140,244,251,255,129,121,253,12,239,15,133, + 244,165,129,121,253,20,239,15,133,244,165,255,139,105,16,133,252,237,15,136, + 244,251,3,41,15,128,244,247,137,41,255,59,105,8,199,65,28,237,137,105,24, + 255,15,142,244,253,248,1,248,6,141,180,253,134,233,255,141,180,253,134,233, + 15,183,70,252,254,15,142,245,248,1,248,6,255,15,143,244,253,248,6,141,180, + 253,134,233,248,1,255,248,7,139,6,15,182,204,15,182,232,131,198,4,193,232, + 16,252,255,36,252,235,248,5,255,3,41,15,128,244,1,137,41,255,15,141,244,7, + 255,141,180,253,134,233,15,183,70,252,254,15,141,245,255,15,140,244,7,255, + 252,233,244,6,248,9,255,129,121,253,4,239,255,15,131,244,60,129,121,253,12, + 239,15,131,244,60,255,129,121,253,12,239,15,131,244,165,129,121,253,20,239, + 15,131,244,165,255,139,105,20,255,129,252,253,239,15,131,244,60,255,252,242, + 15,16,1,252,242,15,16,73,8,255,252,242,15,88,65,16,252,242,15,17,1,133,252, + 237,15,136,244,249,255,15,140,244,249,255,102,15,46,200,248,1,252,242,15, + 17,65,24,255,221,65,8,221,1,255,220,65,16,221,17,221,81,24,133,252,237,15, + 136,244,247,255,221,81,24,15,140,244,247,255,217,201,248,1,255,15,183,70, + 252,254,255,15,131,244,7,255,15,131,244,248,141,180,253,134,233,255,141,180, + 253,134,233,15,183,70,252,254,15,131,245,255,15,130,244,7,255,15,130,244, + 248,141,180,253,134,233,255,248,3,102,15,46,193,252,233,244,1,255,141,12, + 202,139,105,4,129,252,253,239,15,132,244,247,255,137,105,252,252,139,41,137, + 105,252,248,252,233,245,255,141,180,253,134,233,139,1,137,105,252,252,137, + 65,252,248,255,139,139,233,139,4,129,72,139,128,233,139,108,36,96,137,147, + 233,137,171,233,76,137,100,36,80,76,137,108,36,32,76,137,116,36,24,76,137, + 124,36,16,72,137,225,72,129,252,236,239,102,15,127,49,102,15,127,185,233, + 102,68,15,127,129,233,102,68,15,127,137,233,102,68,15,127,145,233,102,68, + 15,127,153,233,102,68,15,127,161,233,102,68,15,127,169,233,102,68,15,127, + 177,233,102,68,15,127,185,233,252,255,224,255,141,180,253,134,233,139,6,15, + 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,255,137,252,245, + 209,252,237,129,229,239,102,131,172,253,43,233,1,15,132,244,150,255,139,190, + 233,139,108,36,96,141,12,202,59,141,233,15,135,244,24,15,182,142,233,57,200, + 15,134,244,249,248,2,255,15,183,70,252,254,252,233,245,255,248,3,199,68,194, + 252,252,237,131,192,1,57,200,15,134,244,3,252,233,244,2,255,141,44,197,237, + 141,4,194,139,122,252,248,137,104,252,252,137,120,252,248,139,108,36,96,141, + 12,200,59,141,233,15,135,244,23,137,209,137,194,15,182,174,233,133,252,237, + 15,132,244,248,248,1,131,193,8,57,209,15,131,244,249,139,121,252,248,137, + 56,139,121,252,252,137,120,4,131,192,8,199,65,252,252,237,131,252,237,1,15, + 133,244,1,248,2,255,139,190,233,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,252,235,255,248,3,199,64,4,237,131,192,8,131,252,237,1, + 15,133,244,3,252,233,244,2,255,139,106,252,248,72,139,189,233,139,108,36, + 96,141,68,194,252,248,137,149,233,141,136,233,59,141,233,137,133,233,255, + 72,137,252,250,137,252,233,255,15,135,244,22,199,131,233,237,255,252,255, + 215,255,252,255,147,233,255,199,131,233,237,139,149,233,141,12,194,252,247, + 217,3,141,233,139,114,252,252,252,233,244,12,255,254,0 +}; + +enum { + GLOB_vm_returnp, + GLOB_cont_dispatch, + GLOB_vm_returnc, + GLOB_BC_RET_Z, + GLOB_vm_return, + GLOB_vm_leave_cp, + GLOB_vm_leave_unw, + GLOB_vm_unwind_c, + GLOB_vm_unwind_c_eh, + GLOB_vm_unwind_rethrow, + GLOB_vm_unwind_ff, + GLOB_vm_unwind_ff_eh, + GLOB_vm_growstack_c, + GLOB_vm_growstack_v, + GLOB_vm_growstack_f, + GLOB_vm_resume, + GLOB_vm_pcall, + GLOB_vm_call, + GLOB_vm_call_dispatch, + GLOB_vmeta_call, + GLOB_vm_call_dispatch_f, + GLOB_vm_cpcall, + GLOB_vm_call_tail, + GLOB_cont_cat, + GLOB_cont_ra, + GLOB_BC_CAT_Z, + GLOB_vmeta_tgets, + GLOB_vmeta_tgetb, + GLOB_vmeta_tgetv, + GLOB_vmeta_tsets, + GLOB_vmeta_tsetb, + GLOB_vmeta_tsetv, + GLOB_cont_nop, + GLOB_vmeta_comp, + GLOB_vmeta_binop, + GLOB_cont_condt, + GLOB_cont_condf, + GLOB_vmeta_equal, + GLOB_vmeta_equal_cd, + GLOB_vmeta_arith_vno, + GLOB_vmeta_arith_vn, + GLOB_vmeta_arith_nvo, + GLOB_vmeta_arith_nv, + GLOB_vmeta_unm, + GLOB_vmeta_arith_vvo, + GLOB_vmeta_arith_vv, + GLOB_vmeta_len, + GLOB_BC_LEN_Z, + GLOB_vmeta_call_ra, + GLOB_BC_CALLT_Z, + GLOB_vmeta_for, + GLOB_ff_assert, + GLOB_fff_fallback, + GLOB_fff_res_, + GLOB_ff_type, + GLOB_fff_res1, + GLOB_ff_getmetatable, + GLOB_ff_setmetatable, + GLOB_ff_rawget, + GLOB_ff_tonumber, + GLOB_fff_resi, + GLOB_fff_resxmm0, + GLOB_fff_resn, + GLOB_ff_tostring, + GLOB_fff_gcstep, + GLOB_ff_next, + GLOB_fff_res2, + GLOB_fff_res, + GLOB_ff_pairs, + GLOB_ff_ipairs_aux, + GLOB_fff_res0, + GLOB_ff_ipairs, + GLOB_ff_pcall, + GLOB_ff_xpcall, + GLOB_ff_coroutine_resume, + GLOB_ff_coroutine_wrap_aux, + GLOB_ff_coroutine_yield, + GLOB_ff_math_abs, + GLOB_fff_resbit, + GLOB_ff_math_floor, + GLOB_vm_floor, + GLOB_ff_math_ceil, + GLOB_vm_ceil, + GLOB_ff_math_sqrt, + GLOB_ff_math_log, + GLOB_ff_math_log10, + GLOB_ff_math_exp, + GLOB_vm_exp_x87, + GLOB_ff_math_sin, + GLOB_ff_math_cos, + GLOB_ff_math_tan, + GLOB_ff_math_asin, + GLOB_ff_math_acos, + GLOB_ff_math_atan, + GLOB_ff_math_sinh, + GLOB_ff_math_cosh, + GLOB_ff_math_tanh, + GLOB_ff_math_deg, + GLOB_ff_math_rad, + GLOB_ff_math_atan2, + GLOB_ff_math_ldexp, + GLOB_ff_math_frexp, + GLOB_ff_math_modf, + GLOB_vm_trunc, + GLOB_ff_math_fmod, + GLOB_ff_math_pow, + GLOB_vm_pow, + GLOB_ff_math_min, + GLOB_ff_math_max, + GLOB_ff_string_len, + GLOB_ff_string_byte, + GLOB_ff_string_char, + GLOB_fff_newstr, + GLOB_ff_string_sub, + GLOB_fff_emptystr, + GLOB_ff_string_rep, + GLOB_fff_fallback_2, + GLOB_ff_string_reverse, + GLOB_fff_fallback_1, + GLOB_ff_string_lower, + GLOB_ff_string_upper, + GLOB_ff_table_getn, + GLOB_ff_bit_tobit, + GLOB_ff_bit_band, + GLOB_fff_fallback_bit_op, + GLOB_ff_bit_bor, + GLOB_ff_bit_bxor, + GLOB_ff_bit_bswap, + GLOB_ff_bit_bnot, + GLOB_ff_bit_lshift, + GLOB_ff_bit_rshift, + GLOB_ff_bit_arshift, + GLOB_ff_bit_rol, + GLOB_ff_bit_ror, + GLOB_vm_record, + GLOB_vm_rethook, + GLOB_vm_inshook, + GLOB_cont_hook, + GLOB_vm_hotloop, + GLOB_vm_callhook, + GLOB_vm_hotcall, + GLOB_vm_exit_handler, + GLOB_vm_exit_interp, + GLOB_vm_floor_sse, + GLOB_vm_ceil_sse, + GLOB_vm_trunc_sse, + GLOB_vm_mod, + GLOB_vm_exp2_x87, + GLOB_vm_exp2raw, + GLOB_vm_pow_sse, + GLOB_vm_powi_sse, + GLOB_vm_foldfpm, + GLOB_vm_foldarith, + GLOB_vm_cpuid, + GLOB_vm_ffi_call, + GLOB_assert_bad_for_arg_type, + GLOB_BC_MODVN_Z, + GLOB_BC_TGETS_Z, + GLOB_BC_TSETS_Z, + GLOB__MAX +}; +static const char *const globnames[] = { + "vm_returnp", + "cont_dispatch", + "vm_returnc", + "BC_RET_Z", + "vm_return", + "vm_leave_cp", + "vm_leave_unw", + "vm_unwind_c@8", + "vm_unwind_c_eh", + "vm_unwind_rethrow", + "vm_unwind_ff@4", + "vm_unwind_ff_eh", + "vm_growstack_c", + "vm_growstack_v", + "vm_growstack_f", + "vm_resume", + "vm_pcall", + "vm_call", + "vm_call_dispatch", + "vmeta_call", + "vm_call_dispatch_f", + "vm_cpcall", + "vm_call_tail", + "cont_cat", + "cont_ra", + "BC_CAT_Z", + "vmeta_tgets", + "vmeta_tgetb", + "vmeta_tgetv", + "vmeta_tsets", + "vmeta_tsetb", + "vmeta_tsetv", + "cont_nop", + "vmeta_comp", + "vmeta_binop", + "cont_condt", + "cont_condf", + "vmeta_equal", + "vmeta_equal_cd", + "vmeta_arith_vno", + "vmeta_arith_vn", + "vmeta_arith_nvo", + "vmeta_arith_nv", + "vmeta_unm", + "vmeta_arith_vvo", + "vmeta_arith_vv", + "vmeta_len", + "BC_LEN_Z", + "vmeta_call_ra", + "BC_CALLT_Z", + "vmeta_for", + "ff_assert", + "fff_fallback", + "fff_res_", + "ff_type", + "fff_res1", + "ff_getmetatable", + "ff_setmetatable", + "ff_rawget", + "ff_tonumber", + "fff_resi", + "fff_resxmm0", + "fff_resn", + "ff_tostring", + "fff_gcstep", + "ff_next", + "fff_res2", + "fff_res", + "ff_pairs", + "ff_ipairs_aux", + "fff_res0", + "ff_ipairs", + "ff_pcall", + "ff_xpcall", + "ff_coroutine_resume", + "ff_coroutine_wrap_aux", + "ff_coroutine_yield", + "ff_math_abs", + "fff_resbit", + "ff_math_floor", + "vm_floor", + "ff_math_ceil", + "vm_ceil", + "ff_math_sqrt", + "ff_math_log", + "ff_math_log10", + "ff_math_exp", + "vm_exp_x87", + "ff_math_sin", + "ff_math_cos", + "ff_math_tan", + "ff_math_asin", + "ff_math_acos", + "ff_math_atan", + "ff_math_sinh", + "ff_math_cosh", + "ff_math_tanh", + "ff_math_deg", + "ff_math_rad", + "ff_math_atan2", + "ff_math_ldexp", + "ff_math_frexp", + "ff_math_modf", + "vm_trunc", + "ff_math_fmod", + "ff_math_pow", + "vm_pow", + "ff_math_min", + "ff_math_max", + "ff_string_len", + "ff_string_byte", + "ff_string_char", + "fff_newstr", + "ff_string_sub", + "fff_emptystr", + "ff_string_rep", + "fff_fallback_2", + "ff_string_reverse", + "fff_fallback_1", + "ff_string_lower", + "ff_string_upper", + "ff_table_getn", + "ff_bit_tobit", + "ff_bit_band", + "fff_fallback_bit_op", + "ff_bit_bor", + "ff_bit_bxor", + "ff_bit_bswap", + "ff_bit_bnot", + "ff_bit_lshift", + "ff_bit_rshift", + "ff_bit_arshift", + "ff_bit_rol", + "ff_bit_ror", + "vm_record", + "vm_rethook", + "vm_inshook", + "cont_hook", + "vm_hotloop", + "vm_callhook", + "vm_hotcall", + "vm_exit_handler", + "vm_exit_interp", + "vm_floor_sse", + "vm_ceil_sse", + "vm_trunc_sse", + "vm_mod", + "vm_exp2_x87", + "vm_exp2raw", + "vm_pow_sse", + "vm_powi_sse", + "vm_foldfpm", + "vm_foldarith", + "vm_cpuid", + "vm_ffi_call@4", + "assert_bad_for_arg_type", + "BC_MODVN_Z", + "BC_TGETS_Z", + "BC_TSETS_Z", + (const char *)0 +}; +static const char *const extnames[] = { + "lj_state_growstack@8", + "lj_meta_tget", + "lj_meta_tset", + "lj_meta_comp", + "lj_meta_equal", + "lj_meta_equal_cd@8", + "lj_meta_arith", + "lj_meta_len@8", + "lj_meta_call", + "lj_meta_for@8", + "lj_tab_get", + "lj_str_fromnumber@8", + "lj_str_fromnum@8", + "lj_tab_next", + "lj_tab_getinth@8", + "lj_ffh_coroutine_wrap_err@8", + "lj_vm_sinh", + "lj_vm_cosh", + "lj_vm_tanh", + "lj_str_new", + "lj_tab_len@4", + "lj_gc_step@4", + "lj_dispatch_ins@8", + "lj_trace_hot@8", + "lj_dispatch_call@8", + "lj_trace_exit@8", + "lj_err_throw@8", + "lj_meta_cat", + "lj_gc_barrieruv@8", + "lj_func_closeuv@8", + "lj_func_newL_gc", + "lj_tab_new", + "lj_gc_step_fixtop@4", + "lj_tab_dup@8", + "lj_tab_newkey", + "lj_tab_reasize", + (const char *)0 +}; +#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V) +#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V) +#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V) +#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V) +#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V) +#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V) +#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V) +#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V) +#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V) +#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V) +#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V) +#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V) +#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V) +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx, int cmov, int sse) +{ + dasm_put(Dst, 0); + dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C); + dasm_put(Dst, 109, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL); + dasm_put(Dst, 198, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK); + dasm_put(Dst, 276, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base)); + dasm_put(Dst, 356, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE); + dasm_put(Dst, 511, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base)); + dasm_put(Dst, 604, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL); + dasm_put(Dst, 770, 0, Dt7(->pc), PC2PROTO(k), Dt1(->base), LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB); + dasm_put(Dst, 894); + if (LJ_DUALNUM) { + dasm_put(Dst, 908, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 918); + } else { + } + dasm_put(Dst, 931, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET); + dasm_put(Dst, 1078, DISPATCH_GL(tmptv), LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 908, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 918); + } else { + } + dasm_put(Dst, 1101, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 1274, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base)); + dasm_put(Dst, 1374); +#if LJ_HASFFI + dasm_put(Dst, 1394, Dt1(->base)); +#endif + dasm_put(Dst, 1425); +#if LJ_DUALNUM + dasm_put(Dst, 1428); +#endif + dasm_put(Dst, 1434); +#if LJ_DUALNUM + dasm_put(Dst, 902); +#endif + dasm_put(Dst, 1446); +#if LJ_DUALNUM + dasm_put(Dst, 1428); +#endif + dasm_put(Dst, 1474, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base)); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1580); +#else + dasm_put(Dst, 1599); +#endif + dasm_put(Dst, 1604, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND); + dasm_put(Dst, 1790, 1+1, ~LJ_TNUMX); + if (cmov) { + dasm_put(Dst, 1859); + } else { + dasm_put(Dst, 1863); + } + dasm_put(Dst, 1872, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, ~LJ_TLIGHTUD, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL); + dasm_put(Dst, 1951, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next)); + dasm_put(Dst, 2008, LJ_TNIL, LJ_TUDATA, LJ_TNUMX, LJ_TISNUM, LJ_TLIGHTUD); + dasm_put(Dst, 2074, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB); + dasm_put(Dst, 2144, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist), 2+1, LJ_TTAB); + dasm_put(Dst, 2233, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2247); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 2269); + } else { + dasm_put(Dst, 2279); + } + dasm_put(Dst, 2286, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2352, Dt1(->base)); + if (LJ_DUALNUM) { + dasm_put(Dst, 2376); + } else { + dasm_put(Dst, 2381); + } + dasm_put(Dst, 2386, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2); + dasm_put(Dst, 2479, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2526, Dt6(->metatable)); +#endif + dasm_put(Dst, 2535, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2521); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 2590); + if (LJ_DUALNUM) { + dasm_put(Dst, 2595, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2611, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + } else { + } + dasm_put(Dst, 2644, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0); + dasm_put(Dst, 2506, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2526, Dt6(->metatable)); +#endif + dasm_put(Dst, 2721, Dt8(->upvalue[0]), LJ_TFUNC); + if (LJ_DUALNUM) { + dasm_put(Dst, 2742, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2754); + } else { + dasm_put(Dst, 2764); + } + dasm_put(Dst, 2771, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC); + dasm_put(Dst, 2835, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top)); + dasm_put(Dst, 2925, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP); + dasm_put(Dst, 3013, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE); + dasm_put(Dst, 3126, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe)); + dasm_put(Dst, 3224, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top)); + dasm_put(Dst, 3291, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack)); + dasm_put(Dst, 3379, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME); + dasm_put(Dst, 3491, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status)); + if (!LJ_DUALNUM) { + dasm_put(Dst, 3518); + } + if (sse) { + dasm_put(Dst, 3521); + } + dasm_put(Dst, 3536, 1+1); + if (LJ_DUALNUM) { + dasm_put(Dst, 3547, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 3627, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3637, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32)); + } else { + dasm_put(Dst, 3668); + } + dasm_put(Dst, 3685, 1+1, FRAME_TYPE, LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 3781, LJ_TISNUM); + } else { + dasm_put(Dst, 3627, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3803); + if (LJ_DUALNUM) { + dasm_put(Dst, 3812); + } + dasm_put(Dst, 2274); + } else { + dasm_put(Dst, 3846); + if (LJ_DUALNUM) { + } else { + dasm_put(Dst, 2281); + } + } + dasm_put(Dst, 3852); + if (LJ_DUALNUM) { + dasm_put(Dst, 3781, LJ_TISNUM); + } else { + dasm_put(Dst, 3627, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3855); + if (LJ_DUALNUM) { + dasm_put(Dst, 3812); + } + dasm_put(Dst, 2274); + } else { + dasm_put(Dst, 3864); + if (LJ_DUALNUM) { + } else { + dasm_put(Dst, 2281); + } + } + if (sse) { + dasm_put(Dst, 3870, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 3899, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 3928, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 3997, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4054, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4117, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM); + dasm_put(Dst, 4207); + if (sse) { + dasm_put(Dst, 4219, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4244); + if (sse) { + dasm_put(Dst, 4258, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4283); + if (sse) { + dasm_put(Dst, 4297, 1+1, LJ_TISNUM); + } else { + } + dasm_put(Dst, 4322); + if (sse) { + dasm_put(Dst, 4338, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } else { + dasm_put(Dst, 4377, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } + dasm_put(Dst, 4410, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM); + dasm_put(Dst, 4475, 1+1, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4574); + } else { + dasm_put(Dst, 4580); + } + dasm_put(Dst, 4589); + if (sse) { + dasm_put(Dst, 4614); + } else { + dasm_put(Dst, 4620); + } + dasm_put(Dst, 4623, 1+2); + if (sse) { + dasm_put(Dst, 4632); + } else { + dasm_put(Dst, 4640); + } + dasm_put(Dst, 4648); + if (sse) { + dasm_put(Dst, 4651, (unsigned int)(U64x(43500000,00000000)), (unsigned int)((U64x(43500000,00000000))>>32)); + } else { + dasm_put(Dst, 4678); + } + dasm_put(Dst, 4697); + if (sse) { + dasm_put(Dst, 4713, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4738, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4760); + if (sse) { + dasm_put(Dst, 4782); + } else { + dasm_put(Dst, 4808); + } + dasm_put(Dst, 4825, 1+2); + if (sse) { + dasm_put(Dst, 4865); + } else { + dasm_put(Dst, 4873); + } + dasm_put(Dst, 4883, 2+1, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4935, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 4982, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 5023, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5036, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4574); + } else { + } + dasm_put(Dst, 5086); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 5097, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5118); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5139); + } else { + } + dasm_put(Dst, 5164, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5177, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4574); + } else { + } + dasm_put(Dst, 5086); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 5097, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5118); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5227); + } else { + } + if (!sse) { + dasm_put(Dst, 5252); + } + dasm_put(Dst, 5261, 1+1, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 5283, Dt5(->len)); + } else if (sse) { + dasm_put(Dst, 5291, Dt5(->len)); + } else { + dasm_put(Dst, 5302, Dt5(->len)); + } + dasm_put(Dst, 5310, 1+1, LJ_TSTR, Dt5(->len), Dt5([1])); + if (LJ_DUALNUM) { + dasm_put(Dst, 5286); + } else if (sse) { + dasm_put(Dst, 5348); + } else { + dasm_put(Dst, 5358); + } + dasm_put(Dst, 5371, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5402); + } else if (sse) { + dasm_put(Dst, 5425); + } else { + dasm_put(Dst, 5451); + } + dasm_put(Dst, 5475, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5583); + } else if (sse) { + dasm_put(Dst, 5595); + } else { + dasm_put(Dst, 5610); + } + dasm_put(Dst, 5622, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2521); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5639, Dt5(->len)); + if (LJ_DUALNUM) { + dasm_put(Dst, 5649); + } else if (sse) { + dasm_put(Dst, 5653); + } else { + } + dasm_put(Dst, 5660, sizeof(GCstr)-1); + dasm_put(Dst, 5735, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 5794, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5811); + } else if (sse) { + dasm_put(Dst, 5819); + } else { + dasm_put(Dst, 5830); + } + dasm_put(Dst, 5846, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 5911, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 5974, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz)); + dasm_put(Dst, 6045, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 6130, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 6200, 1+1, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 6268); + } else if (sse) { + dasm_put(Dst, 6275); + } else { + } + dasm_put(Dst, 6285, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6301); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 106); + if (LJ_DUALNUM || sse) { + if (!sse) { + } + dasm_put(Dst, 6342); + } else { + } + dasm_put(Dst, 6347, 1+1); + if (sse) { + dasm_put(Dst, 6358, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6368); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6394); + } else { + } + dasm_put(Dst, 6409, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6434); + } else { + dasm_put(Dst, 6454); + } + if (sse) { + dasm_put(Dst, 6459); + } else { + } + dasm_put(Dst, 6476, 1+1); + if (sse) { + dasm_put(Dst, 6358, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6368); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6394); + } else { + } + dasm_put(Dst, 6409, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6494); + } else { + dasm_put(Dst, 6454); + } + if (sse) { + dasm_put(Dst, 6514); + } else { + } + dasm_put(Dst, 6531, 1+1); + if (sse) { + dasm_put(Dst, 6358, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + dasm_put(Dst, 6368); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6394); + } else { + } + dasm_put(Dst, 6409, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6549); + } else { + dasm_put(Dst, 6454); + } + if (sse) { + dasm_put(Dst, 6569); + } else { + } + dasm_put(Dst, 6586, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6609, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6633); + if (LJ_DUALNUM) { + dasm_put(Dst, 6342); + } else if (sse) { + dasm_put(Dst, 6639); + } else { + } + dasm_put(Dst, 6651); + if (LJ_DUALNUM) { + dasm_put(Dst, 6662, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6678, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6693, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6760); + if (LJ_DUALNUM) { + dasm_put(Dst, 6767, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6678, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6783, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6850); + if (LJ_DUALNUM) { + dasm_put(Dst, 6858, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6678, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6874, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6941); + if (LJ_DUALNUM) { + dasm_put(Dst, 6949, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6678, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 6965, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 7032); + if (LJ_DUALNUM) { + dasm_put(Dst, 7039, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6377); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6318, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 6678, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7055, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32)); + } else { + } + dasm_put(Dst, 7122, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base)); + dasm_put(Dst, 7198, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 7325, Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7364, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount)); +#endif + dasm_put(Dst, 7395, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE); + dasm_put(Dst, 7446, Dt1(->base), Dt1(->base), GG_DISP2STATIC); +#if LJ_HASJIT + dasm_put(Dst, 7513, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L)); +#endif + dasm_put(Dst, 7560); +#if LJ_HASJIT + dasm_put(Dst, 7390); +#endif + dasm_put(Dst, 7567); +#if LJ_HASJIT + dasm_put(Dst, 7570); +#endif + dasm_put(Dst, 7580, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7614); +#endif + dasm_put(Dst, 7619, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 7650, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 16*8+4*8, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), 4*8, GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC); +#endif + dasm_put(Dst, 7880); +#if LJ_HASJIT + dasm_put(Dst, 7883, 9*16+4*8, -9*16, -8*16, -7*16, -6*16, -5*16, -4*16, -3*16, -2*16, -1*16, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF); + dasm_put(Dst, 8025); +#endif + dasm_put(Dst, 8051); + if (!sse) { + dasm_put(Dst, 8054); + } + dasm_put(Dst, 8099, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + if (!sse) { + dasm_put(Dst, 8185); + } + dasm_put(Dst, 8230, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(bff00000,00000000)), (unsigned int)((U64x(bff00000,00000000))>>32)); + if (!sse) { + dasm_put(Dst, 8316); + } + dasm_put(Dst, 8355, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + if (sse) { + dasm_put(Dst, 8444, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + } else { + dasm_put(Dst, 8558); + } + dasm_put(Dst, 8605); + if (!sse) { + } else { + dasm_put(Dst, 8679); + } + dasm_put(Dst, 8682); + dasm_put(Dst, 8767, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32)); + dasm_put(Dst, 8870, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7ff00000,00000000)), (unsigned int)((U64x(7ff00000,00000000))>>32)); + dasm_put(Dst, 9026); +#if LJ_HASJIT + if (sse) { + dasm_put(Dst, 9067); + dasm_put(Dst, 9137); + dasm_put(Dst, 9210); + } else { + dasm_put(Dst, 9260); + dasm_put(Dst, 9352); + } + dasm_put(Dst, 9398); +#endif + dasm_put(Dst, 9402); + if (sse) { + dasm_put(Dst, 9405, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32)); + dasm_put(Dst, 9494, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32)); + } else { + dasm_put(Dst, 9618); + dasm_put(Dst, 9701); + if (cmov) { + dasm_put(Dst, 9756); + } else { + dasm_put(Dst, 9775); + } + dasm_put(Dst, 9398); + } + dasm_put(Dst, 9816); +#if LJ_HASFFI +#define DtE(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V) + dasm_put(Dst, 9844, DtE(->spadj)); +#if LJ_TARGET_WINDOWS +#endif + dasm_put(Dst, 9859, DtE(->nsp), offsetof(CCallState, stack), CCALL_SPS_EXTRA*8, DtE(->nfpr), DtE(->gpr[0]), DtE(->gpr[1]), DtE(->gpr[2]), DtE(->gpr[3]), DtE(->fpr[0]), DtE(->fpr[1]), DtE(->fpr[2]), DtE(->fpr[3])); + dasm_put(Dst, 9940, DtE(->func), DtE(->gpr[0]), DtE(->fpr[0])); +#if LJ_TARGET_WINDOWS +#endif + dasm_put(Dst, 9953); +#endif + dasm_put(Dst, 9961); +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 9400); +#endif + dasm_put(Dst, 9400); +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse) +{ + int vk = 0; + dasm_put(Dst, 9964, defop); + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + if (LJ_DUALNUM) { + dasm_put(Dst, 9966, LJ_TISNUM, LJ_TISNUM); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 9996); + break; + case BC_ISGE: + dasm_put(Dst, 10001); + break; + case BC_ISLE: + dasm_put(Dst, 10006); + break; + case BC_ISGT: + dasm_put(Dst, 10011); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10016, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10070); + } else { + dasm_put(Dst, 10081); + } + dasm_put(Dst, 10092); + if (sse) { + dasm_put(Dst, 10099); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10119); + break; + case BC_ISGE: + dasm_put(Dst, 10124); + break; + case BC_ISLE: + dasm_put(Dst, 10129); + break; + case BC_ISGT: + dasm_put(Dst, 10134); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10139); + } else { + dasm_put(Dst, 10144); + } + } else { + dasm_put(Dst, 10152, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10173); + } else { + dasm_put(Dst, 10194); + if (cmov) { + dasm_put(Dst, 10210); + } else { + dasm_put(Dst, 10216); + } + } + if (LJ_DUALNUM) { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10119); + break; + case BC_ISGE: + dasm_put(Dst, 10124); + break; + case BC_ISLE: + dasm_put(Dst, 10129); + break; + case BC_ISGT: + dasm_put(Dst, 10134); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10139); + } else { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 10223); + break; + case BC_ISGE: + dasm_put(Dst, 10228); + break; + case BC_ISLE: + dasm_put(Dst, 10233); + break; + case BC_ISGT: + dasm_put(Dst, 10238); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 10243, -BCBIAS_J*4); + } + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + dasm_put(Dst, 10275); + if (LJ_DUALNUM) { + dasm_put(Dst, 10283, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 10308); + } else { + dasm_put(Dst, 10313); + } + dasm_put(Dst, 10318, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10370); + } else { + dasm_put(Dst, 10377); + } + dasm_put(Dst, 10381); + if (sse) { + dasm_put(Dst, 10392); + } else { + dasm_put(Dst, 10404); + } + dasm_put(Dst, 10411); + } else { + dasm_put(Dst, 10416, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10435); + } else { + dasm_put(Dst, 10453); + if (cmov) { + dasm_put(Dst, 10210); + } else { + dasm_put(Dst, 10216); + } + } + iseqne_fp: + if (vk) { + dasm_put(Dst, 10466); + } else { + dasm_put(Dst, 10475); + } + iseqne_end: + if (vk) { + dasm_put(Dst, 10484, -BCBIAS_J*4); + if (!LJ_HASFFI) { + dasm_put(Dst, 4629); + } + } else { + if (!LJ_HASFFI) { + dasm_put(Dst, 4629); + } + dasm_put(Dst, 10499, -BCBIAS_J*4); + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + dasm_put(Dst, 10514); + } else { + dasm_put(Dst, 10255); + } + if (op == BC_ISEQV || op == BC_ISNEV) { + dasm_put(Dst, 9937); + if (LJ_HASFFI) { + dasm_put(Dst, 10519, LJ_TCDATA, LJ_TCDATA); + } + dasm_put(Dst, 10538, LJ_TISPRI, LJ_TISTABUD, Dt6(->metatable), Dt6(->nomm), 1<<MM_eq); + if (vk) { + dasm_put(Dst, 10594); + } else { + dasm_put(Dst, 10598); + } + dasm_put(Dst, 10604); + } else if (LJ_HASFFI) { + dasm_put(Dst, 10609, LJ_TCDATA); + if (LJ_DUALNUM && vk) { + dasm_put(Dst, 10616); + } else { + dasm_put(Dst, 10589); + } + dasm_put(Dst, 10621); + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + dasm_put(Dst, 10626, LJ_TSTR); + iseqne_test: + if (vk) { + dasm_put(Dst, 10470); + } else { + dasm_put(Dst, 765); + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + dasm_put(Dst, 10652); + if (LJ_DUALNUM) { + dasm_put(Dst, 10660, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 10308); + } else { + dasm_put(Dst, 10313); + } + dasm_put(Dst, 10685, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 10733); + } else { + dasm_put(Dst, 10740); + } + dasm_put(Dst, 10744); + if (sse) { + dasm_put(Dst, 10751); + } else { + dasm_put(Dst, 10763); + } + dasm_put(Dst, 10411); + } else { + dasm_put(Dst, 10770, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 10779); + } else { + dasm_put(Dst, 10797); + if (cmov) { + dasm_put(Dst, 10210); + } else { + dasm_put(Dst, 10216); + } + } + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + dasm_put(Dst, 10810); + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + dasm_put(Dst, 10824, -BCBIAS_J*4, LJ_TCDATA); + } else { + dasm_put(Dst, 10874, LJ_TCDATA, -BCBIAS_J*4); + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + dasm_put(Dst, 10918, LJ_TISTRUECOND); + if (op == BC_IST || op == BC_ISTC) { + dasm_put(Dst, 10238); + } else { + dasm_put(Dst, 10233); + } + if (op == BC_ISTC || op == BC_ISFC) { + dasm_put(Dst, 10930); + } + dasm_put(Dst, 10243, -BCBIAS_J*4); + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + dasm_put(Dst, 10941); + break; + case BC_NOT: + dasm_put(Dst, 10969, LJ_TISTRUECOND, LJ_TTRUE); + break; + case BC_UNM: + if (LJ_DUALNUM) { + dasm_put(Dst, 11005, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 11082, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11093, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32)); + } else { + dasm_put(Dst, 11118); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 10514); + } else { + dasm_put(Dst, 10255); + } + break; + case BC_LEN: + dasm_put(Dst, 11127, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 11141, Dt5(->len), LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 11155, Dt5(->len)); + } else { + dasm_put(Dst, 11173, Dt5(->len)); + } + dasm_put(Dst, 11182, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 11217, Dt6(->metatable)); +#endif + dasm_put(Dst, 11231); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 11240); + } else { + } + dasm_put(Dst, 11246); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 11259, Dt6(->nomm), 1<<MM_len); +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11283, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11316, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11349, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11382, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11151); + } else { + dasm_put(Dst, 10937); + } + dasm_put(Dst, 10255); + } else { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11411); + } else { + dasm_put(Dst, 11425); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11456); + } else { + dasm_put(Dst, 11470); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11500); + } else { + dasm_put(Dst, 11514); + } + break; + } + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + } + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11522, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11555, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11588, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11382, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11151); + } else { + dasm_put(Dst, 10937); + } + dasm_put(Dst, 10255); + } else { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11621); + } else { + dasm_put(Dst, 11635); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11643); + } else { + dasm_put(Dst, 11657); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11665); + } else { + dasm_put(Dst, 11679); + } + break; + } + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + } + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11687, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 11721, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 11755, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 11382, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 11151); + } else { + dasm_put(Dst, 10937); + } + dasm_put(Dst, 10255); + } else { + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11789); + } else { + dasm_put(Dst, 11803); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11811); + } else { + dasm_put(Dst, 11825); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11833); + } else { + dasm_put(Dst, 11847); + } + break; + } + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + } + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11855); + } else { + dasm_put(Dst, 11869); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11877); + } else { + dasm_put(Dst, 11891); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11899); + } else { + dasm_put(Dst, 11913); + } + break; + } + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + break; + case BC_MODVN: + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11921); + } else { + dasm_put(Dst, 11935); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11943); + } else { + dasm_put(Dst, 11957); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11965); + } else { + dasm_put(Dst, 11979); + } + break; + } + dasm_put(Dst, 11987); + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + break; + case BC_MODNV: case BC_MODVV: + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11921); + } else { + dasm_put(Dst, 11935); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11943); + } else { + dasm_put(Dst, 11957); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11965); + } else { + dasm_put(Dst, 11979); + } + break; + } + dasm_put(Dst, 11993); + break; + case BC_POW: + dasm_put(Dst, 11275); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 11388, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11400, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11921); + } else { + dasm_put(Dst, 11935); + } + break; + case 1: + dasm_put(Dst, 11433, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 11445, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11943); + } else { + dasm_put(Dst, 11957); + } + break; + default: + dasm_put(Dst, 11478, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11965); + } else { + dasm_put(Dst, 11979); + } + break; + } + dasm_put(Dst, 11998); + if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 10255); + break; + + case BC_CAT: + dasm_put(Dst, 12002, Dt1(->base), Dt1(->base)); + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + dasm_put(Dst, 12085, LJ_TSTR); + break; + case BC_KCDATA: +#if LJ_HASFFI + dasm_put(Dst, 12085, LJ_TCDATA); +#endif + break; + case BC_KSHORT: + if (LJ_DUALNUM) { + dasm_put(Dst, 12120, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 12132); + } else { + dasm_put(Dst, 12147); + } + dasm_put(Dst, 10255); + break; + case BC_KNUM: + if (sse) { + dasm_put(Dst, 12155); + } else { + dasm_put(Dst, 12168); + } + dasm_put(Dst, 10255); + break; + case BC_KPRI: + dasm_put(Dst, 12175); + break; + case BC_KNIL: + dasm_put(Dst, 12203, LJ_TNIL); + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + dasm_put(Dst, 12250, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + dasm_put(Dst, 12290, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G); + dasm_put(Dst, 12381); + break; +#undef TV2MARKOFS + case BC_USETS: + dasm_put(Dst, 12393, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G); + break; + case BC_USETN: + dasm_put(Dst, 12486); + if (sse) { + dasm_put(Dst, 12491); + } else { + dasm_put(Dst, 10766); + } + dasm_put(Dst, 12498, offsetof(GCfuncL, uvptr), DtA(->v)); + if (sse) { + dasm_put(Dst, 12507); + } else { + dasm_put(Dst, 12513); + } + dasm_put(Dst, 10255); + break; + case BC_USETP: + dasm_put(Dst, 12516, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_UCLO: + dasm_put(Dst, 12555, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base)); + break; + + case BC_FNEW: + dasm_put(Dst, 12610, Dt1(->base), Dt1(->base), LJ_TFUNC); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + dasm_put(Dst, 12676, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB); + break; + case BC_TDUP: + dasm_put(Dst, 12798, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB); + break; + + case BC_GGET: + dasm_put(Dst, 12893, Dt7(->env)); + break; + case BC_GSET: + dasm_put(Dst, 12912, Dt7(->env)); + break; + + case BC_TGETV: + dasm_put(Dst, 12931, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 12954, LJ_TISNUM); + } else { + dasm_put(Dst, 12968, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12979); + } else { + } + dasm_put(Dst, 13000); + } + dasm_put(Dst, 13005, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index, LJ_TNIL); + dasm_put(Dst, 13096, LJ_TSTR); + break; + case BC_TGETS: + dasm_put(Dst, 13114, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 13198, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + case BC_TGETB: + dasm_put(Dst, 13269, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + dasm_put(Dst, 13364, LJ_TNIL); + break; + + case BC_TSETV: + dasm_put(Dst, 13381, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 12954, LJ_TISNUM); + } else { + dasm_put(Dst, 12968, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12979); + } else { + } + dasm_put(Dst, 13404); + } + dasm_put(Dst, 13409, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex); + dasm_put(Dst, 13489, LJ_TSTR, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETS: + dasm_put(Dst, 13546, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 13622, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, DtB(->next)); + dasm_put(Dst, 13710, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, LJ_TSTR, Dt1(->base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETB: + dasm_put(Dst, 13801, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable)); + dasm_put(Dst, 13895, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + case BC_TSETM: + dasm_put(Dst, 13941, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base)); + dasm_put(Dst, 14084, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + dasm_put(Dst, 11279); + if (op == BC_CALLM) { + dasm_put(Dst, 14102); + } + dasm_put(Dst, 14107, LJ_TFUNC, Dt7(->pc)); + break; + + case BC_CALLMT: + dasm_put(Dst, 14102); + break; + case BC_CALLT: + dasm_put(Dst, 14149, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc)); + dasm_put(Dst, 14264, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG); + break; + + case BC_ITERC: + dasm_put(Dst, 14335, LJ_TFUNC, 2+1, Dt7(->pc)); + break; + + case BC_ITERN: +#if LJ_HASJIT +#endif + dasm_put(Dst, 14406, Dt6(->asize), Dt6(->array), LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 11146, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 11240); + } else { + dasm_put(Dst, 14452); + } + dasm_put(Dst, 14458); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 11111); + } else { + dasm_put(Dst, 11123); + } + dasm_put(Dst, 14471, -BCBIAS_J*4); + if (!LJ_DUALNUM && !sse) { + dasm_put(Dst, 14523); + } + dasm_put(Dst, 14529, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key), DtB(->val)); + break; + + case BC_ISNEXT: + dasm_put(Dst, 14601, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC); + break; + + case BC_VARG: + dasm_put(Dst, 14701, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack)); + dasm_put(Dst, 14861, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + dasm_put(Dst, 14102); + break; + + case BC_RET: case BC_RET0: case BC_RET1: + if (op != BC_RET0) { + dasm_put(Dst, 14927); + } + dasm_put(Dst, 14931, FRAME_TYPE); + switch (op) { + case BC_RET: + dasm_put(Dst, 14950); + break; + case BC_RET1: + dasm_put(Dst, 15002); + /* fallthrough */ + case BC_RET0: + dasm_put(Dst, 15012); + default: + break; + } + dasm_put(Dst, 15023, Dt7(->pc), PC2PROTO(k)); + if (op == BC_RET) { + dasm_put(Dst, 15067, LJ_TNIL); + } else { + dasm_put(Dst, 15076, LJ_TNIL); + } + dasm_put(Dst, 15083, -FRAME_VARG, FRAME_TYPEP); + if (op != BC_RET0) { + dasm_put(Dst, 15107); + } + dasm_put(Dst, 4708); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + + case BC_FORL: +#if LJ_HASJIT + dasm_put(Dst, 15111, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + dasm_put(Dst, 15132); + if (LJ_DUALNUM) { + dasm_put(Dst, 15136, LJ_TISNUM); + if (!vk) { + dasm_put(Dst, 15146, LJ_TISNUM, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 15175, LJ_TISNUM, LJ_TISNUM); +#endif + dasm_put(Dst, 15194); + } + dasm_put(Dst, 15213, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 15224, -BCBIAS_J*4); + } else if (op == BC_JFORI) { + dasm_put(Dst, 15238, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 15256, -BCBIAS_J*4); + } else { + dasm_put(Dst, 15248, BC_JLOOP); + } + dasm_put(Dst, 15270); + if (vk) { + dasm_put(Dst, 15294); + } + dasm_put(Dst, 15213, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 15303); + } else if (op == BC_JFORI) { + dasm_put(Dst, 15308, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 15322); + } else { + dasm_put(Dst, 15318, BC_JLOOP); + } + dasm_put(Dst, 15327); + } else if (!vk) { + dasm_put(Dst, 15334, LJ_TISNUM); + } + if (!vk) { + dasm_put(Dst, 15340, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 15354, LJ_TISNUM, LJ_TISNUM); +#endif + } + dasm_put(Dst, 15373); + if (!vk) { + dasm_put(Dst, 15377, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 15386); + if (vk) { + dasm_put(Dst, 15398); + } else { + dasm_put(Dst, 15417); + } + dasm_put(Dst, 15422); + } else { + dasm_put(Dst, 15435); + if (vk) { + dasm_put(Dst, 15441); + } else { + dasm_put(Dst, 15457); + } + dasm_put(Dst, 15465); + if (cmov) { + dasm_put(Dst, 10210); + } else { + dasm_put(Dst, 10216); + } + if (!cmov) { + dasm_put(Dst, 15470); + } + } + if (op == BC_FORI) { + if (LJ_DUALNUM) { + dasm_put(Dst, 15476); + } else { + dasm_put(Dst, 15481, -BCBIAS_J*4); + } + } else if (op == BC_JFORI) { + dasm_put(Dst, 15491, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + if (LJ_DUALNUM) { + dasm_put(Dst, 15505); + } else { + dasm_put(Dst, 15510, -BCBIAS_J*4); + } + } else { + dasm_put(Dst, 15501, BC_JLOOP); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 10139); + } else { + dasm_put(Dst, 10896); + } + if (sse) { + dasm_put(Dst, 15520); + } + break; + + case BC_ITERL: +#if LJ_HASJIT + dasm_put(Dst, 15111, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + dasm_put(Dst, 15531, LJ_TNIL); + if (op == BC_JITERL) { + dasm_put(Dst, 15546, BC_JLOOP); + } else { + dasm_put(Dst, 15560, -BCBIAS_J*4); + } + dasm_put(Dst, 10253); + break; + + case BC_LOOP: +#if LJ_HASJIT + dasm_put(Dst, 15111, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_ILOOP: + dasm_put(Dst, 10255); + break; + + case BC_JLOOP: +#if LJ_HASJIT + dasm_put(Dst, 15576, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L), 9*16+4*8, -1*16, -2*16, -3*16, -4*16, -5*16, -6*16, -7*16, -8*16, -9*16); +#endif + break; + + case BC_JMP: + dasm_put(Dst, 15685, -BCBIAS_J*4); + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: +#if LJ_HASJIT + dasm_put(Dst, 15710, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + dasm_put(Dst, 15731, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams)); + if (op == BC_JFUNCF) { + dasm_put(Dst, 15761, BC_JLOOP); + } else { + dasm_put(Dst, 10255); + } + dasm_put(Dst, 15770, LJ_TNIL); + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + dasm_put(Dst, 9400); + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + dasm_put(Dst, 15792, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL); + if (op == BC_JFUNCV) { + dasm_put(Dst, 15761, BC_JLOOP); + } else { + dasm_put(Dst, 15883, -4+PC2PROTO(k)); + } + dasm_put(Dst, 15906, LJ_TNIL); + break; + + case BC_FUNCC: + case BC_FUNCCW: + dasm_put(Dst, 15928, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top)); + if (op == BC_FUNCC) { + dasm_put(Dst, 2372); + } else { + dasm_put(Dst, 15958); + } + dasm_put(Dst, 15966, DISPATCH_GL(vmstate), ~LJ_VMST_C); + if (op == BC_FUNCC) { + dasm_put(Dst, 15975); + } else { + dasm_put(Dst, 15979, DISPATCH_GL(wrapf)); + } + dasm_put(Dst, 15984, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top)); + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + int cmov = 1; + int sse = 0; +#ifdef LUAJIT_CPU_NOCMOV + cmov = 0; +#endif +#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64) + sse = 1; +#endif + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx, cmov, sse); + + dasm_put(Dst, 16009); + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op, cmov, sse); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ +#if LJ_64 +#define SZPTR "8" +#define BSZPTR "3" +#define REG_SP "0x7" +#define REG_RA "0x10" +#else +#define SZPTR "4" +#define BSZPTR "2" +#define REG_SP "0x4" +#define REG_RA "0x8" +#endif + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE0:\n\n", (int)ctx->codesz, CFRAME_SIZE); +#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_) + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE1:\n\n", (int)ctx->codesz, CFRAME_SIZE); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n"); + fprintf(ctx->fp, + "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n", + LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "Lframe1:\n" + "\t.long LECIE1-LSCIE1\n" + "LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zP\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 5\n" /* augmentation length */ + "\t.byte 0x00\n" /* absptr */ + "\t.long %slj_err_unwind_dwarf\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + "LECIE1:\n\n", LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "LSFDE1:\n" + "\t.long LEFDE1-LASFDE1\n" + "LASFDE1:\n" + "\t.long LASFDE1-Lframe1\n" + "\t.long %slj_vm_asm_begin\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE); + break; + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ +#if LJ_64 + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ +#else + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_64 + fprintf(ctx->fp, "\t.subsections_via_symbols\n"); +#else + fprintf(ctx->fp, + "\t.non_lazy_symbol_pointer\n" + "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" + ".indirect_symbol _lj_err_unwind_dwarf\n" + ".long 0\n"); +#endif + } + break; + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/src/luajit2/src/buildvm_x86.dasc b/src/luajit2/src/buildvm_x86.dasc new file mode 100644 index 0000000000..effd073791 --- /dev/null +++ b/src/luajit2/src/buildvm_x86.dasc @@ -0,0 +1,6216 @@ +|// Low-level VM code for x86 CPUs. +|// Bytecode interpreter, fast functions and helper functions. +|// Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +| +|.if X64 +|.arch x64 +|.else +|.arch x86 +|.endif +|.section code_op, code_sub +| +|.actionlist build_actionlist +|.globals GLOB_ +|.globalnames globnames +|.externnames extnames +| +|//----------------------------------------------------------------------- +| +|// Fixed register assignments for the interpreter. +|// This is very fragile and has many dependencies. Caveat emptor. +|.define BASE, edx // Not C callee-save, refetched anyway. +|.if not X64 +|.define KBASE, edi // Must be C callee-save. +|.define KBASEa, KBASE +|.define PC, esi // Must be C callee-save. +|.define PCa, PC +|.define DISPATCH, ebx // Must be C callee-save. +|.elif X64WIN +|.define KBASE, edi // Must be C callee-save. +|.define KBASEa, rdi +|.define PC, esi // Must be C callee-save. +|.define PCa, rsi +|.define DISPATCH, ebx // Must be C callee-save. +|.else +|.define KBASE, r15d // Must be C callee-save. +|.define KBASEa, r15 +|.define PC, ebx // Must be C callee-save. +|.define PCa, rbx +|.define DISPATCH, r14d // Must be C callee-save. +|.endif +| +|.define RA, ecx +|.define RAH, ch +|.define RAL, cl +|.define RB, ebp // Must be ebp (C callee-save). +|.define RC, eax // Must be eax (fcomparepp and others). +|.define RCW, ax +|.define RCH, ah +|.define RCL, al +|.define OP, RB +|.define RD, RC +|.define RDW, RCW +|.define RDL, RCL +|.if X64 +|.define RAa, rcx +|.define RBa, rbp +|.define RCa, rax +|.define RDa, rax +|.else +|.define RAa, RA +|.define RBa, RB +|.define RCa, RC +|.define RDa, RD +|.endif +| +|.if not X64 +|.define FCARG1, ecx // x86 fastcall arguments. +|.define FCARG2, edx +|.elif X64WIN +|.define CARG1, rcx // x64/WIN64 C call arguments. +|.define CARG2, rdx +|.define CARG3, r8 +|.define CARG4, r9 +|.define CARG1d, ecx +|.define CARG2d, edx +|.define CARG3d, r8d +|.define CARG4d, r9d +|.define FCARG1, CARG1d // Upwards compatible to x86 fastcall. +|.define FCARG2, CARG2d +|.else +|.define CARG1, rdi // x64/POSIX C call arguments. +|.define CARG2, rsi +|.define CARG3, rdx +|.define CARG4, rcx +|.define CARG5, r8 +|.define CARG6, r9 +|.define CARG1d, edi +|.define CARG2d, esi +|.define CARG3d, edx +|.define CARG4d, ecx +|.define CARG5d, r8d +|.define CARG6d, r9d +|.define FCARG1, CARG1d // Simulate x86 fastcall. +|.define FCARG2, CARG2d +|.endif +| +|// Type definitions. Some of these are only used for documentation. +|.type L, lua_State +|.type GL, global_State +|.type TVALUE, TValue +|.type GCOBJ, GCobj +|.type STR, GCstr +|.type TAB, GCtab +|.type LFUNC, GCfuncL +|.type CFUNC, GCfuncC +|.type PROTO, GCproto +|.type UPVAL, GCupval +|.type NODE, Node +|.type NARGS, int +|.type TRACE, GCtrace +| +|// Stack layout while in interpreter. Must match with lj_frame.h. +|//----------------------------------------------------------------------- +|.if not X64 // x86 stack layout. +| +|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--). +|.macro saveregs +| push ebp; push edi; push esi; push ebx +| sub esp, CFRAME_SPACE +|.endmacro +|.macro restoreregs +| add esp, CFRAME_SPACE +| pop ebx; pop esi; pop edi; pop ebp +|.endmacro +| +|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only. +|.define SAVE_NRES, aword [esp+aword*14] +|.define SAVE_CFRAME, aword [esp+aword*13] +|.define SAVE_L, aword [esp+aword*12] +|//----- 16 byte aligned, ^^^ arguments from C caller +|.define SAVE_RET, aword [esp+aword*11] //<-- esp entering interpreter. +|.define SAVE_R4, aword [esp+aword*10] +|.define SAVE_R3, aword [esp+aword*9] +|.define SAVE_R2, aword [esp+aword*8] +|//----- 16 byte aligned +|.define SAVE_R1, aword [esp+aword*7] //<-- esp after register saves. +|.define SAVE_PC, aword [esp+aword*6] +|.define TMP2, aword [esp+aword*5] +|.define TMP1, aword [esp+aword*4] +|//----- 16 byte aligned +|.define ARG4, aword [esp+aword*3] +|.define ARG3, aword [esp+aword*2] +|.define ARG2, aword [esp+aword*1] +|.define ARG1, aword [esp] //<-- esp while in interpreter. +|//----- 16 byte aligned, ^^^ arguments for C callee +| +|// FPARGx overlaps ARGx and ARG(x+1) on x86. +|.define FPARG3, qword [esp+qword*1] +|.define FPARG1, qword [esp] +|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ). +|.define TMPQ, qword [esp+aword*4] +|.define TMP3, ARG4 +|.define ARG5, TMP1 +|.define TMPa, TMP1 +|.define MULTRES, TMP2 +| +|// Arguments for vm_call and vm_pcall. +|.define INARG_BASE, SAVE_CFRAME // Overwritten by SAVE_CFRAME! +| +|// Arguments for vm_cpcall. +|.define INARG_CP_CALL, SAVE_ERRF +|.define INARG_CP_UD, SAVE_NRES +|.define INARG_CP_FUNC, SAVE_CFRAME +| +|//----------------------------------------------------------------------- +|.elif X64WIN // x64/Windows stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs +| push rbp; push rdi; push rsi; push rbx +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +| pop rbx; pop rsi; pop rdi; pop rbp +|.endmacro +| +|.define SAVE_CFRAME, aword [rsp+aword*13] +|.define SAVE_PC, dword [rsp+dword*25] +|.define SAVE_L, dword [rsp+dword*24] +|.define SAVE_ERRF, dword [rsp+dword*23] +|.define SAVE_NRES, dword [rsp+dword*22] +|.define TMP2, dword [rsp+dword*21] +|.define TMP1, dword [rsp+dword*20] +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.define ARG5, aword [rsp+aword*4] +|.define CSAVE_4, aword [rsp+aword*3] +|.define CSAVE_3, aword [rsp+aword*2] +|.define CSAVE_2, aword [rsp+aword*1] +|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee +| +|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). +|.define TMPQ, qword [rsp+aword*10] +|.define MULTRES, TMP2 +|.define TMPa, ARG5 +|.define ARG5d, dword [rsp+aword*4] +|.define TMP3, ARG5d +| +|//----------------------------------------------------------------------- +|.else // x64/POSIX stack layout +| +|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--). +|.macro saveregs +| push rbp; push rbx; push r15; push r14 +| sub rsp, CFRAME_SPACE +|.endmacro +|.macro restoreregs +| add rsp, CFRAME_SPACE +| pop r14; pop r15; pop rbx; pop rbp +|.endmacro +| +|//----- 16 byte aligned, +|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter. +|.define SAVE_R4, aword [rsp+aword*8] +|.define SAVE_R3, aword [rsp+aword*7] +|.define SAVE_R2, aword [rsp+aword*6] +|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves. +|.define SAVE_CFRAME, aword [rsp+aword*4] +|.define SAVE_PC, dword [rsp+dword*7] +|.define SAVE_L, dword [rsp+dword*6] +|.define SAVE_ERRF, dword [rsp+dword*5] +|.define SAVE_NRES, dword [rsp+dword*4] +|.define TMPa, aword [rsp+aword*1] +|.define TMP2, dword [rsp+dword*1] +|.define TMP1, dword [rsp] //<-- rsp while in interpreter. +|//----- 16 byte aligned +| +|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ). +|.define TMPQ, qword [rsp] +|.define TMP3, dword [rsp+aword*1] +|.define MULTRES, TMP2 +| +|.endif +| +|//----------------------------------------------------------------------- +| +|// Instruction headers. +|.macro ins_A; .endmacro +|.macro ins_AD; .endmacro +|.macro ins_AJ; .endmacro +|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro +|.macro ins_AB_; movzx RB, RCH; .endmacro +|.macro ins_A_C; movzx RC, RCL; .endmacro +|.macro ins_AND; not RDa; .endmacro +| +|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster). +|.macro ins_NEXT +| mov RC, [PC] +| movzx RA, RCH +| movzx OP, RCL +| add PC, 4 +| shr RC, 16 +|.if X64 +| jmp aword [DISPATCH+OP*8] +|.else +| jmp aword [DISPATCH+OP*4] +|.endif +|.endmacro +| +|// Instruction footer. +|.if 1 +| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use. +| .define ins_next, ins_NEXT +| .define ins_next_, ins_NEXT +|.else +| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch. +| // Affects only certain kinds of benchmarks (and only with -j off). +| // Around 10%-30% slower on Core2, a lot more slower on P4. +| .macro ins_next +| jmp ->ins_next +| .endmacro +| .macro ins_next_ +| ->ins_next: +| ins_NEXT +| .endmacro +|.endif +| +|// Call decode and dispatch. +|.macro ins_callt +| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC +| mov PC, LFUNC:RB->pc +| mov RA, [PC] +| movzx OP, RAL +| movzx RA, RAH +| add PC, 4 +|.if X64 +| jmp aword [DISPATCH+OP*8] +|.else +| jmp aword [DISPATCH+OP*4] +|.endif +|.endmacro +| +|.macro ins_call +| // BASE = new base, RB = LFUNC, RD = nargs+1 +| mov [BASE-4], PC +| ins_callt +|.endmacro +| +|//----------------------------------------------------------------------- +| +|// Macros to test operand types. +|.macro checktp, reg, tp; cmp dword [BASE+reg*8+4], tp; .endmacro +|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro +|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro +|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro +|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro +| +|// These operands must be used with movzx. +|.define PC_OP, byte [PC-4] +|.define PC_RA, byte [PC-3] +|.define PC_RB, byte [PC-1] +|.define PC_RC, byte [PC-2] +|.define PC_RD, word [PC-2] +| +|.macro branchPC, reg +| lea PC, [PC+reg*4-BCBIAS_J*4] +|.endmacro +| +|// Assumes DISPATCH is relative to GL. +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +| +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) +| +|// Decrement hashed hotcount and trigger trace recorder if zero. +|.macro hotloop, reg +| mov reg, PC +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], 1 +| jz ->vm_hotloop +|.endmacro +| +|.macro hotcall, reg +| mov reg, PC +| shr reg, 1 +| and reg, HOTCOUNT_PCMASK +| sub word [DISPATCH+reg+GG_DISP2HOT], 1 +| jz ->vm_hotcall +|.endmacro +| +|// Set current VM state. +|.macro set_vmstate, st +| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st +|.endmacro +| +|// Annoying x87 stuff: support for two compare variants. +|.macro fcomparepp // Compare and pop st0 >< st1. +||if (cmov) { +| fucomip st1 +| fpop +||} else { +| fucompp +| fnstsw ax // eax modified! +| sahf +||} +|.endmacro +| +|.macro fdup; fld st0; .endmacro +|.macro fpop1; fstp st1; .endmacro +| +|// Synthesize SSE FP constants. +|.macro sseconst_abs, reg, tmp // Synthesize abs mask. +|.if X64 +| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp +|.else +| pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1 +|.endif +|.endmacro +| +|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const. +|.if X64 +| mov64 tmp, U64x(val,00000000); movd reg, tmp +|.else +| mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51 +|.endif +|.endmacro +| +|.macro sseconst_sign, reg, tmp // Synthesize sign mask. +| sseconst_hi reg, tmp, 80000000 +|.endmacro +|.macro sseconst_1, reg, tmp // Synthesize 1.0. +| sseconst_hi reg, tmp, 3ff00000 +|.endmacro +|.macro sseconst_m1, reg, tmp // Synthesize -1.0. +| sseconst_hi reg, tmp, bff00000 +|.endmacro +|.macro sseconst_2p52, reg, tmp // Synthesize 2^52. +| sseconst_hi reg, tmp, 43300000 +|.endmacro +|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51. +| sseconst_hi reg, tmp, 43380000 +|.endmacro +| +|// Move table write barrier back. Overwrites reg. +|.macro barrierback, tab, reg +| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab) +| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)] +| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab +| mov tab->gclist, reg +|.endmacro +| +|//----------------------------------------------------------------------- + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx, int cmov, int sse) +{ + |.code_sub + | + |//----------------------------------------------------------------------- + |//-- Return handling ---------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_returnp: + | test PC, FRAME_P + | jz ->cont_dispatch + | + | // Return from pcall or xpcall fast func. + | and PC, -8 + | sub BASE, PC // Restore caller base. + | lea RAa, [RA+PC-8] // Rebase RA and prepend one result. + | mov PC, [BASE-4] // Fetch PC of previous frame. + | // Prepending may overwrite the pcall frame, so do it at the end. + | mov dword [BASE+RA+4], LJ_TTRUE // Prepend true to results. + | + |->vm_returnc: + | add RD, 1 // RD = nresults+1 + | mov MULTRES, RD + | test PC, FRAME_TYPE + | jz ->BC_RET_Z // Handle regular return to Lua. + | + |->vm_return: + | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return + | xor PC, FRAME_C + | test PC, FRAME_TYPE + | jnz ->vm_returnp + | + | // Return to C. + | set_vmstate C + | and PC, -8 + | sub PC, BASE + | neg PC // Previous base = BASE - delta. + | + | sub RD, 1 + | jz >2 + |1: // Move results down. + |.if X64 + | mov RBa, [BASE+RA] + | mov [BASE-8], RBa + |.else + | mov RB, [BASE+RA] + | mov [BASE-8], RB + | mov RB, [BASE+RA+4] + | mov [BASE-4], RB + |.endif + | add BASE, 8 + | sub RD, 1 + | jnz <1 + |2: + | mov L:RB, SAVE_L + | mov L:RB->base, PC + |3: + | mov RD, MULTRES + | mov RA, SAVE_NRES // RA = wanted nresults+1 + |4: + | cmp RA, RD + | jne >6 // More/less results wanted? + |5: + | sub BASE, 8 + | mov L:RB->top, BASE + | + |->vm_leave_cp: + | mov RAa, SAVE_CFRAME // Restore previous C frame. + | mov L:RB->cframe, RAa + | xor eax, eax // Ok return status for vm_pcall. + | + |->vm_leave_unw: + | restoreregs + | ret + | + |6: + | jb >7 // Less results wanted? + | // More results wanted. Check stack size and fill up results with nil. + | cmp BASE, L:RB->maxstack + | ja >8 + | mov dword [BASE-4], LJ_TNIL + | add BASE, 8 + | add RD, 1 + | jmp <4 + | + |7: // Less results wanted. + | test RA, RA + | jz <5 // But check for LUA_MULTRET+1. + | sub RA, RD // Negative result! + | lea BASE, [BASE+RA*8] // Correct top. + | jmp <5 + | + |8: // Corner case: need to grow stack for filling up results. + | // This can happen if: + | // - A C function grows the stack (a lot). + | // - The GC shrinks the stack in between. + | // - A return back from a lua_call() with (high) nresults adjustment. + | mov L:RB->top, BASE // Save current top held in BASE (yes). + | mov MULTRES, RD // Need to fill only remainder with nil. + | mov FCARG2, RA + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->top // Need the (realloced) L->top in BASE. + | jmp <3 + | + |->vm_unwind_c@8: // Unwind C stack, return from vm_pcall. + | // (void *cframe, int errcode) + |.if X64 + | mov eax, CARG2d // Error return status for vm_pcall. + | mov rsp, CARG1 + |.else + | mov eax, FCARG2 // Error return status for vm_pcall. + | mov esp, FCARG1 + |.endif + |->vm_unwind_c_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov GL:RB, L:RB->glref + | mov dword GL:RB->vmstate, ~LJ_VMST_C + | jmp ->vm_leave_unw + | + |->vm_unwind_rethrow: + |.if X64 and not X64WIN + | mov FCARG1, SAVE_L + | mov FCARG2, eax + | restoreregs + | jmp extern lj_err_throw@8 // (lua_State *L, int errcode) + |.endif + | + |->vm_unwind_ff@4: // Unwind C stack, return from ff pcall. + | // (void *cframe) + |.if X64 + | and CARG1, CFRAME_RAWMASK + | mov rsp, CARG1 + |.else + | and FCARG1, CFRAME_RAWMASK + | mov esp, FCARG1 + |.endif + |->vm_unwind_ff_eh: // Landing pad for external unwinder. + | mov L:RB, SAVE_L + | mov RAa, -8 // Results start at BASE+RA = BASE-8. + | mov RD, 1+1 // Really 1+2 results, incr. later. + | mov BASE, L:RB->base + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov PC, [BASE-4] // Fetch PC of previous frame. + | mov dword [BASE-4], LJ_TFALSE // Prepend false to error message. + | set_vmstate INTERP + | jmp ->vm_returnc // Increments RD/MULTRES and returns. + | + |//----------------------------------------------------------------------- + |//-- Grow stack for calls ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_growstack_c: // Grow stack for C function. + | mov FCARG2, LUA_MINSTACK + | jmp >2 + | + |->vm_growstack_v: // Grow stack for vararg Lua function. + | sub RD, 8 + | jmp >1 + | + |->vm_growstack_f: // Grow stack for fixarg Lua function. + | // BASE = new base, RD = nargs+1, RB = L, PC = first PC + | lea RD, [BASE+NARGS:RD*8-8] + |1: + | movzx RA, byte [PC-4+PC2PROTO(framesize)] + | add PC, 4 // Must point after first instruction. + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov SAVE_PC, PC + | mov FCARG2, RA + |2: + | // RB = L, L->base = new base, L->top = top + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | mov LFUNC:RB, [BASE-8] + | sub RD, BASE + | shr RD, 3 + | add NARGS:RD, 1 + | // BASE = new base, RB = LFUNC, RD = nargs+1 + | ins_callt // Just retry the call. + | + |//----------------------------------------------------------------------- + |//-- Entry points into the assembler VM --------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_resume: // Setup C frame and resume thread. + | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0) + | saveregs + |.if X64 + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + | mov RA, CARG2d + |.else + | mov L:RB, SAVE_L + | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! + |.endif + | mov PC, FRAME_CP + | xor RD, RD + | lea KBASEa, [esp+CFRAME_RESUME] + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | mov L:RB->cframe, KBASEa + | mov SAVE_PC, RD // Any value outside of bytecode is ok. + | mov SAVE_CFRAME, RDa + |.if X64 + | mov SAVE_NRES, RD + | mov SAVE_ERRF, RD + |.endif + | cmp byte L:RB->status, RDL + | je >3 // Initial resume (like a call). + | + | // Resume after yield (like a return). + | set_vmstate INTERP + | mov byte L:RB->status, RDL + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, RA + | shr RD, 3 + | add RD, 1 // RD = nresults+1 + | sub RA, BASE // RA = resultofs + | mov PC, [BASE-4] + | mov MULTRES, RD + | test PC, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |->vm_pcall: // Setup protected C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef) + | saveregs + | mov PC, FRAME_CP + |.if X64 + | mov SAVE_ERRF, CARG4d + |.endif + | jmp >1 + | + |->vm_call: // Setup C frame and enter VM. + | // (lua_State *L, TValue *base, int nres1) + | saveregs + | mov PC, FRAME_C + | + |1: // Entry point for vm_pcall above (PC = ftype). + |.if X64 + | mov SAVE_NRES, CARG3d + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + | mov RA, CARG2d + |.else + | mov L:RB, SAVE_L + | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! + |.endif + | + | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASEa + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + |.if X64 + | mov L:RB->cframe, rsp + |.else + | mov L:RB->cframe, esp + |.endif + | + |2: // Entry point for vm_cpcall below (RA = base, RB = L, PC = ftype). + | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. + | add DISPATCH, GG_G2DISP + | + |3: // Entry point for vm_resume above (RA = base, RB = L, PC = ftype). + | set_vmstate INTERP + | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). + | add PC, RA + | sub PC, BASE // PC = frame delta + frame type + | + | mov RD, L:RB->top + | sub RD, RA + | shr NARGS:RD, 3 + | add NARGS:RD, 1 // RD = nargs+1 + | + |->vm_call_dispatch: + | mov LFUNC:RB, [RA-8] + | cmp dword [RA-4], LJ_TFUNC + | jne ->vmeta_call // Ensure KBASE defined and != BASE. + | + |->vm_call_dispatch_f: + | mov BASE, RA + | ins_call + | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC + | + |->vm_cpcall: // Setup protected C frame, call C. + | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp) + | saveregs + |.if X64 + | mov L:RB, CARG1d // Caveat: CARG1d may be RA. + | mov SAVE_L, CARG1d + |.else + | mov L:RB, SAVE_L + | // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap! + | mov RC, INARG_CP_UD // Get args before they are overwritten. + | mov RA, INARG_CP_FUNC + | mov BASE, INARG_CP_CALL + |.endif + | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. + | + | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). + | sub KBASE, L:RB->top + | mov SAVE_ERRF, 0 // No error function. + | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame. + | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). + | + |.if X64 + | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASEa + | mov L:RB->cframe, rsp + | + | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) + |.else + | mov ARG3, RC // Have to copy args downwards. + | mov ARG2, RA + | mov ARG1, L:RB + | + | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. + | mov SAVE_CFRAME, KBASE + | mov L:RB->cframe, esp + | + | call BASE // (lua_State *L, lua_CFunction func, void *ud) + |.endif + | // TValue * (new base) or NULL returned in eax (RC). + | test RC, RC + | jz ->vm_leave_cp // No base? Just remove C frame. + | mov RA, RC + | mov PC, FRAME_CP + | jmp <2 // Else continue with the call. + | + |//----------------------------------------------------------------------- + |//-- Metamethod handling ------------------------------------------------ + |//----------------------------------------------------------------------- + | + |//-- Continuation dispatch ---------------------------------------------- + | + |->cont_dispatch: + | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES) + | add RA, BASE + | and PC, -8 + | mov RB, BASE + | sub BASE, PC // Restore caller BASE. + | mov dword [RA+RD*8-4], LJ_TNIL // Ensure one valid arg. + | mov RC, RA // ... in [RC] + | mov PC, [RB-12] // Restore PC from [cont|PC]. + |.if X64 + | movsxd RAa, dword [RB-16] // May be negative on WIN64 with debug. + | test RA, RA + | jz >1 + | lea KBASEa, qword [=>0] + | add RAa, KBASEa + |.else + | mov RA, dword [RB-16] + | test RA, RA + | jz >1 + |.endif + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | // BASE = base, RC = result, RB = meta base + | jmp RAa // Jump to continuation. + | + |1: // Tail call from C function. + | sub RB, BASE + | shr RB, 3 + | lea RD, [RB-1] + | jmp ->vm_call_tail + | + |->cont_cat: // BASE = base, RC = result, RB = mbase + | movzx RA, PC_RB + | sub RB, 16 + | lea RA, [BASE+RA*8] + | sub RA, RB + | je ->cont_ra + | neg RA + | shr RA, 3 + |.if X64WIN + | mov CARG3d, RA + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | mov RCa, [RC] + | mov [RB], RCa + | mov CARG2d, RB + |.elif X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | mov CARG3d, RA + | mov RAa, [RC] + | mov [RB], RAa + | mov CARG2d, RB + |.else + | mov ARG3, RA + | mov RA, [RC+4] + | mov RC, [RC] + | mov [RB+4], RA + | mov [RB], RC + | mov ARG2, RB + |.endif + | jmp ->BC_CAT_Z + | + |//-- Table indexing metamethods ----------------------------------------- + | + |->vmeta_tgets: + | mov TMP1, RC // RC = GCstr * + | mov TMP2, LJ_TSTR + | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. + | cmp PC_OP, BC_GGET + | jne >1 + | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RA], TAB:RB // RB = GCtab * + | mov dword [RA+4], LJ_TTAB + | mov RB, RA + | jmp >2 + | + |->vmeta_tgetb: + | movzx RC, PC_RC + if (LJ_DUALNUM) { + | mov TMP2, LJ_TISNUM + | mov TMP1, RC + } else if (sse) { + | cvtsi2sd xmm0, RC + | movsd TMPQ, xmm0 + } else { + |.if not X64 + | mov ARG4, RC + | fild ARG4 + | fstp TMPQ + |.endif + } + | lea RCa, TMPQ // Store temp. TValue in TMPQ. + | jmp >1 + | + |->vmeta_tgetv: + | movzx RC, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RB, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RB + | mov CARG3, RCa // May be 64 bit ptr to stack. + | mov L:RB, L:CARG1d + |.else + | mov ARG2, RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + |->cont_ra: // BASE = base, RC = result + | movzx RA, PC_RA + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC+4] + | mov RC, [RC] + | mov [BASE+RA*8+4], RB + | mov [BASE+RA*8], RC + |.endif + | ins_next + | + |3: // Call __index metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k + | mov RA, L:RB->top + | mov [RA-12], PC // [cont|PC] + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. + | mov NARGS:RD, 2+1 // 2 args for func(t, k). + | jmp ->vm_call_dispatch_f + | + |//----------------------------------------------------------------------- + | + |->vmeta_tsets: + | mov TMP1, RC // RC = GCstr * + | mov TMP2, LJ_TSTR + | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2. + | cmp PC_OP, BC_GSET + | jne >1 + | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv. + | mov [RA], TAB:RB // RB = GCtab * + | mov dword [RA+4], LJ_TTAB + | mov RB, RA + | jmp >2 + | + |->vmeta_tsetb: + | movzx RC, PC_RC + if (LJ_DUALNUM) { + | mov TMP2, LJ_TISNUM + | mov TMP1, RC + } else if (sse) { + | cvtsi2sd xmm0, RC + | movsd TMPQ, xmm0 + } else { + |.if not X64 + | mov ARG4, RC + | fild ARG4 + | fstp TMPQ + |.endif + } + | lea RCa, TMPQ // Store temp. TValue in TMPQ. + | jmp >1 + | + |->vmeta_tsetv: + | movzx RC, PC_RC // Reload TValue *k from RC. + | lea RC, [BASE+RC*8] + |1: + | movzx RB, PC_RB // Reload TValue *t from RB. + | lea RB, [BASE+RB*8] + |2: + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RB + | mov CARG3, RCa // May be 64 bit ptr to stack. + | mov L:RB, L:CARG1d + |.else + | mov ARG2, RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) + | // TValue * (finished) or NULL (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz >3 + | // NOBARRIER: lj_meta_tset ensures the table is not black. + | movzx RA, PC_RA + |.if X64 + | mov RBa, [BASE+RA*8] + | mov [RC], RBa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + |->cont_nop: // BASE = base, (RC = result) + | ins_next + | + |3: // Call __newindex metamethod. + | // BASE = base, L->top = new base, stack = cont/func/t/k/(v) + | mov RA, L:RB->top + | mov [RA-12], PC // [cont|PC] + | movzx RC, PC_RA + | // Copy value to third argument. + |.if X64 + | mov RBa, [BASE+RC*8] + | mov [RA+16], RBa + |.else + | mov RB, [BASE+RC*8+4] + | mov RC, [BASE+RC*8] + | mov [RA+20], RB + | mov [RA+16], RC + |.endif + | lea PC, [RA+FRAME_CONT] + | sub PC, BASE + | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here. + | mov NARGS:RD, 3+1 // 3 args for func(t, k, v). + | jmp ->vm_call_dispatch_f + | + |//-- Comparison metamethods --------------------------------------------- + | + |->vmeta_comp: + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d == BASE. + |.if X64WIN + | lea CARG3d, [BASE+RD*8] + | lea CARG2d, [BASE+RA*8] + |.else + | lea CARG2d, [BASE+RA*8] + | lea CARG3d, [BASE+RD*8] + |.endif + | mov CARG1d, L:RB // Caveat: CARG1d/CARG4d == RA. + | movzx CARG4d, PC_OP + |.else + | movzx RB, PC_OP + | lea RD, [BASE+RD*8] + | lea RA, [BASE+RA*8] + | mov ARG4, RB + | mov L:RB, SAVE_L + | mov ARG3, RD + | mov ARG2, RA + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + |3: + | mov BASE, L:RB->base + | cmp RC, 1 + | ja ->vmeta_binop + |4: + | lea PC, [PC+4] + | jb >6 + |5: + | movzx RD, PC_RD + | branchPC RD + |6: + | ins_next + | + |->cont_condt: // BASE = base, RC = result + | add PC, 4 + | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is true. + | jb <5 + | jmp <6 + | + |->cont_condf: // BASE = base, RC = result + | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is false. + | jmp <4 + | + |->vmeta_equal: + | sub PC, 4 + |.if X64WIN + | mov CARG3d, RD + | mov CARG4d, RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d == BASE. + | mov CARG2d, RA + | mov CARG1d, L:RB // Caveat: CARG1d == RA. + |.elif X64 + | mov CARG2d, RA + | mov CARG4d, RB // Caveat: CARG4d == RA. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG3d == BASE. + | mov CARG3d, RD + | mov CARG1d, L:RB + |.else + | mov ARG4, RB + | mov L:RB, SAVE_L + | mov ARG3, RD + | mov ARG2, RA + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 + | + |->vmeta_equal_cd: +#if LJ_HASFFI + | sub PC, 4 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG1, L:RB + | mov FCARG2, dword [PC-4] + | mov SAVE_PC, PC + | call extern lj_meta_equal_cd@8 // (lua_State *L, BCIns ins) + | // 0/1 or TValue * (metamethod) returned in eax (RC). + | jmp <3 +#endif + | + |//-- Arithmetic metamethods --------------------------------------------- + | + |->vmeta_arith_vno: +#if LJ_DUALNUM + | movzx RB, PC_RB +#endif + |->vmeta_arith_vn: + | lea RC, [KBASE+RC*8] + | jmp >1 + | + |->vmeta_arith_nvo: +#if LJ_DUALNUM + | movzx RC, PC_RC +#endif + |->vmeta_arith_nv: + | lea RC, [KBASE+RC*8] + | lea RB, [BASE+RB*8] + | xchg RB, RC + | jmp >2 + | + |->vmeta_unm: + | lea RC, [BASE+RD*8] + | mov RB, RC + | jmp >2 + | + |->vmeta_arith_vvo: +#if LJ_DUALNUM + | movzx RB, PC_RB +#endif + |->vmeta_arith_vv: + | lea RC, [BASE+RC*8] + |1: + | lea RB, [BASE+RB*8] + |2: + | lea RA, [BASE+RA*8] + |.if X64WIN + | mov CARG3d, RB + | mov CARG4d, RC + | movzx RC, PC_OP + | mov ARG5d, RC + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d == BASE. + | mov CARG2d, RA + | mov CARG1d, L:RB // Caveat: CARG1d == RA. + |.elif X64 + | movzx CARG5d, PC_OP + | mov CARG2d, RA + | mov CARG4d, RC // Caveat: CARG4d == RA. + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG3d == BASE. + | mov CARG3d, RB + | mov L:RB, L:CARG1d + |.else + | mov ARG3, RB + | mov L:RB, SAVE_L + | mov ARG4, RC + | movzx RC, PC_OP + | mov ARG2, RA + | mov ARG5, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jz ->cont_nop + | + | // Call metamethod for binary op. + |->vmeta_binop: + | // BASE = base, RC = new base, stack = cont/func/o1/o2 + | mov RA, RC + | sub RC, BASE + | mov [RA-12], PC // [cont|PC] + | lea PC, [RC+FRAME_CONT] + | mov NARGS:RD, 2+1 // 2 args for func(o1, o2). + | jmp ->vm_call_dispatch + | + |->vmeta_len: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | lea FCARG2, [BASE+RD*8] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB + | mov SAVE_PC, PC + | call extern lj_meta_len@8 // (lua_State *L, TValue *o) + | // NULL (retry) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | test RC, RC + | jne ->vmeta_binop // Binop call for compatibility. + | movzx RD, PC_RD + | mov TAB:FCARG1, [BASE+RD*8] + | jmp ->BC_LEN_Z +#else + | jmp ->vmeta_binop // Binop call for compatibility. +#endif + | + |//-- Call metamethod ---------------------------------------------------- + | + |->vmeta_call_ra: + | lea RA, [BASE+RA*8+8] + |->vmeta_call: // Resolve and call __call metamethod. + | // BASE = old base, RA = new base, RC = nargs+1, PC = return + | mov TMP2, RA // Save RA, RC for us. + | mov TMP1, NARGS:RD + | sub RA, 8 + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, RA + | lea CARG3d, [RA+NARGS:RD*8] + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | lea RC, [RA+NARGS:RD*8] + | mov L:RB, SAVE_L + | mov ARG2, RA + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE // This is the callers base! + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top) + | mov BASE, L:RB->base + | mov RA, TMP2 + | mov NARGS:RD, TMP1 + | mov LFUNC:RB, [RA-8] + | add NARGS:RD, 1 + | // This is fragile. L->base must not move, KBASE must always be defined. + | cmp KBASE, BASE // Continue with CALLT if flag set. + | je ->BC_CALLT_Z + | mov BASE, RA + | ins_call // Otherwise call resolved metamethod. + | + |//-- Argument coercion for 'for' statement ------------------------------ + | + |->vmeta_for: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, RA // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | mov SAVE_PC, PC + | call extern lj_meta_for@8 // (lua_State *L, TValue *base) + | mov BASE, L:RB->base + | mov RC, [PC-4] + | movzx RA, RCH + | movzx OP, RCL + | shr RC, 16 + |.if X64 + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI. + |.else + | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Retry FORI or JFORI. + |.endif + | + |//----------------------------------------------------------------------- + |//-- Fast functions ----------------------------------------------------- + |//----------------------------------------------------------------------- + | + |.macro .ffunc, name + |->ff_ .. name: + |.endmacro + | + |.macro .ffunc_1, name + |->ff_ .. name: + | cmp NARGS:RD, 1+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_2, name + |->ff_ .. name: + | cmp NARGS:RD, 2+1; jb ->fff_fallback + |.endmacro + | + |.macro .ffunc_n, name + | .ffunc_1 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | fld qword [BASE] + |.endmacro + | + |.macro .ffunc_n, name, op + | .ffunc_1 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | op + | fld qword [BASE] + |.endmacro + | + |.macro .ffunc_nsse, name, op + | .ffunc_1 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | op xmm0, qword [BASE] + |.endmacro + | + |.macro .ffunc_nsse, name + | .ffunc_nsse name, movsd + |.endmacro + | + |.macro .ffunc_nn, name + | .ffunc_2 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback + | fld qword [BASE] + | fld qword [BASE+8] + |.endmacro + | + |.macro .ffunc_nnsse, name + | .ffunc_2 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback + | movsd xmm0, qword [BASE] + | movsd xmm1, qword [BASE+8] + |.endmacro + | + |.macro .ffunc_nnr, name + | .ffunc_2 name + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback + | fld qword [BASE+8] + | fld qword [BASE] + |.endmacro + | + |// Inlined GC threshold check. Caveat: uses label 1. + |.macro ffgccheck + | mov RB, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)] + | jb >1 + | call ->fff_gcstep + |1: + |.endmacro + | + |//-- Base library: checks ----------------------------------------------- + | + |.ffunc_1 assert + | mov RB, [BASE+4] + | cmp RB, LJ_TISTRUECOND; jae ->fff_fallback + | mov PC, [BASE-4] + | mov MULTRES, RD + | mov [BASE-4], RB + | mov RB, [BASE] + | mov [BASE-8], RB + | sub RD, 2 + | jz >2 + | mov RA, BASE + |1: + | add RA, 8 + |.if X64 + | mov RBa, [RA] + | mov [RA-8], RBa + |.else + | mov RB, [RA+4] + | mov [RA-4], RB + | mov RB, [RA] + | mov [RA-8], RB + |.endif + | sub RD, 1 + | jnz <1 + |2: + | mov RD, MULTRES + | jmp ->fff_res_ + | + |.ffunc_1 type + | mov RB, [BASE+4] + |.if X64 + | mov RA, RB + | sar RA, 15 + | cmp RA, -2 + | je >3 + |.endif + | mov RC, ~LJ_TNUMX + | not RB + | cmp RC, RB + ||if (cmov) { + | cmova RC, RB + ||} else { + | jbe >1; mov RC, RB; 1: + ||} + |2: + | mov CFUNC:RB, [BASE-8] + | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RC + | jmp ->fff_res1 + |.if X64 + |3: + | mov RC, ~LJ_TLIGHTUD + | jmp <2 + |.endif + | + |//-- Base library: getters and setters --------------------------------- + | + |.ffunc_1 getmetatable + | mov RB, [BASE+4] + | mov PC, [BASE-4] + | cmp RB, LJ_TTAB; jne >6 + |1: // Field metatable must be at same offset for GCtab and GCudata! + | mov TAB:RB, [BASE] + | mov TAB:RB, TAB:RB->metatable + |2: + | test TAB:RB, TAB:RB + | mov dword [BASE-4], LJ_TNIL + | jz ->fff_res1 + | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)] + | mov dword [BASE-4], LJ_TTAB // Store metatable as default result. + | mov [BASE-8], TAB:RB + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | add NODE:RA, TAB:RB->node + |3: // Rearranged logic, because we expect _not_ to find the key. + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >4 + | cmp dword NODE:RA->key.gcr, STR:RC + | je >5 + |4: + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <3 + | jmp ->fff_res1 // Not found, keep default result. + |5: + | mov RB, [RA+4] + | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value. + | mov RC, [RA] + | mov [BASE-4], RB // Return value of mt.__metatable. + | mov [BASE-8], RC + | jmp ->fff_res1 + | + |6: + | cmp RB, LJ_TUDATA; je <1 + |.if X64 + | cmp RB, LJ_TNUMX; ja >8 + | cmp RB, LJ_TISNUM; jbe >7 + | mov RB, LJ_TLIGHTUD + | jmp >8 + |7: + |.else + | cmp RB, LJ_TISNUM; ja >8 + |.endif + | mov RB, LJ_TNUMX + |8: + | not RB + | mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])] + | jmp <2 + | + |.ffunc_2 setmetatable + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | // Fast path: no mt for table yet and not clearing the mt. + | mov TAB:RB, [BASE] + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TTAB; jne ->fff_fallback + | mov TAB:RC, [BASE+8] + | mov TAB:RB->metatable, TAB:RC + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TTAB // Return original table. + | mov [BASE-8], TAB:RB + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jz >1 + | // Possible write barrier. Table is black, but skip iswhite(mt) check. + | barrierback TAB:RB, RC + |1: + | jmp ->fff_res1 + | + |.ffunc_2 rawget + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + |.if X64WIN + | mov RB, BASE // Save BASE. + | lea CARG3d, [BASE+8] + | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. + | mov CARG1d, SAVE_L + |.elif X64 + | mov RB, BASE // Save BASE. + | mov CARG2d, [BASE] + | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. + | mov CARG1d, SAVE_L + |.else + | mov TAB:RD, [BASE] + | mov L:RB, SAVE_L + | mov ARG2, TAB:RD + | mov ARG1, L:RB + | mov RB, BASE // Save BASE. + | add BASE, 8 + | mov ARG3, BASE + |.endif + | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) + | // cTValue * returned in eax (RD). + | mov BASE, RB // Restore BASE. + | // Copy table slot. + |.if X64 + | mov RBa, [RD] + | mov PC, [BASE-4] + | mov [BASE-8], RBa + |.else + | mov RB, [RD] + | mov RD, [RD+4] + | mov PC, [BASE-4] + | mov [BASE-8], RB + | mov [BASE-4], RD + |.endif + | jmp ->fff_res1 + | + |//-- Base library: conversions ------------------------------------------ + | + |.ffunc tonumber + | // Only handles the number case inline (without a base argument). + | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. + | cmp dword [BASE+4], LJ_TISNUM + if (LJ_DUALNUM) { + | jne >1 + | mov RB, dword [BASE]; jmp ->fff_resi + |1: + | ja ->fff_fallback + } else { + | jae ->fff_fallback + } + if (sse) { + | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0 + } else { + | fld qword [BASE]; jmp ->fff_resn + } + | + |.ffunc_1 tostring + | // Only handles the string or number case inline. + | mov PC, [BASE-4] + | cmp dword [BASE+4], LJ_TSTR; jne >3 + | // A __tostring method in the string base metatable is ignored. + | mov STR:RD, [BASE] + |2: + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RD + | jmp ->fff_res1 + |3: // Handle numbers inline, unless a number base metatable is present. + | cmp dword [BASE+4], LJ_TISNUM; ja ->fff_fallback + | cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0 + | jne ->fff_fallback + | ffgccheck // Caveat: uses label 1. + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov SAVE_PC, PC // Redundant (but a defined value). + |.if X64 and not X64WIN + | mov FCARG2, BASE // Otherwise: FCARG2 == BASE + |.endif + | mov L:FCARG1, L:RB + if (LJ_DUALNUM) { + | call extern lj_str_fromnumber@8 // (lua_State *L, cTValue *o) + } else { + | call extern lj_str_fromnum@8 // (lua_State *L, lua_Number *np) + } + | // GCstr returned in eax (RD). + | mov BASE, L:RB->base + | jmp <2 + | + |//-- Base library: iterators ------------------------------------------- + | + |.ffunc_1 next + | je >2 // Missing 2nd arg? + |1: + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Add frame since C call can throw. + | mov L:RB->top, BASE // Dummy frame length is ok. + | mov PC, [BASE-4] + |.if X64WIN + | lea CARG3d, [BASE+8] + | mov CARG2d, [BASE] // Caveat: CARG2d == BASE. + | mov CARG1d, L:RB + |.elif X64 + | mov CARG2d, [BASE] + | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE. + | mov CARG1d, L:RB + |.else + | mov TAB:RD, [BASE] + | mov ARG2, TAB:RD + | mov ARG1, L:RB + | add BASE, 8 + | mov ARG3, BASE + |.endif + | mov SAVE_PC, PC // Redundant (but a defined value). + | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key) + | // Flag returned in eax (RD). + | mov BASE, L:RB->base + | test RD, RD; jz >3 // End of traversal? + | // Copy key and value to results. + |.if X64 + | mov RBa, [BASE+8] + | mov RDa, [BASE+16] + | mov [BASE-8], RBa + | mov [BASE], RDa + |.else + | mov RB, [BASE+8] + | mov RD, [BASE+12] + | mov [BASE-8], RB + | mov [BASE-4], RD + | mov RB, [BASE+16] + | mov RD, [BASE+20] + | mov [BASE], RB + | mov [BASE+4], RD + |.endif + |->fff_res2: + | mov RD, 1+2 + | jmp ->fff_res + |2: // Set missing 2nd arg to nil. + | mov dword [BASE+12], LJ_TNIL + | jmp <1 + |3: // End of traversal: return nil. + | mov dword [BASE-4], LJ_TNIL + | jmp ->fff_res1 + | + |.ffunc_1 pairs + | mov TAB:RB, [BASE] + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RB, [BASE-8] + | mov CFUNC:RD, CFUNC:RB->upvalue[0] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TFUNC + | mov [BASE-8], CFUNC:RD + | mov dword [BASE+12], LJ_TNIL + | mov RD, 1+3 + | jmp ->fff_res + | + |.ffunc_1 ipairs_aux + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM + if (LJ_DUALNUM) { + | jne ->fff_fallback + } else { + | jae ->fff_fallback + } + | mov PC, [BASE-4] + if (LJ_DUALNUM) { + | mov RD, dword [BASE+8] + | add RD, 1 + | mov dword [BASE-4], LJ_TISNUM + | mov dword [BASE-8], RD + } else if (sse) { + | movsd xmm0, qword [BASE+8] + | sseconst_1 xmm1, RBa + | addsd xmm0, xmm1 + | cvtsd2si RD, xmm0 + | movsd qword [BASE-8], xmm0 + } else { + |.if not X64 + | fld qword [BASE+8] + | fld1 + | faddp st1 + | fist ARG1 + | fstp qword [BASE-8] + | mov RD, ARG1 + |.endif + } + | mov TAB:RB, [BASE] + | cmp RD, TAB:RB->asize; jae >2 // Not in array part? + | shl RD, 3 + | add RD, TAB:RB->array + |1: + | cmp dword [RD+4], LJ_TNIL; je ->fff_res0 + | // Copy array slot. + |.if X64 + | mov RBa, [RD] + | mov [BASE], RBa + |.else + | mov RB, [RD] + | mov RD, [RD+4] + | mov [BASE], RB + | mov [BASE+4], RD + |.endif + | jmp ->fff_res2 + |2: // Check for empty hash part first. Otherwise call C function. + | cmp dword TAB:RB->hmask, 0; je ->fff_res0 + | mov FCARG1, TAB:RB + | mov RB, BASE // Save BASE. + | mov FCARG2, RD // Caveat: FCARG2 == BASE + | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key) + | // cTValue * or NULL returned in eax (RD). + | mov BASE, RB + | test RD, RD + | jnz <1 + |->fff_res0: + | mov RD, 1+0 + | jmp ->fff_res + | + |.ffunc_1 ipairs + | mov TAB:RB, [BASE] + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback +#endif + | mov CFUNC:RB, [BASE-8] + | mov CFUNC:RD, CFUNC:RB->upvalue[0] + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TFUNC + | mov [BASE-8], CFUNC:RD + if (LJ_DUALNUM) { + | mov dword [BASE+12], LJ_TISNUM + | mov dword [BASE+8], 0 + } else if (sse) { + | xorps xmm0, xmm0 + | movsd qword [BASE+8], xmm0 + } else { + | fldz + | fstp qword [BASE+8] + } + | mov RD, 1+3 + | jmp ->fff_res + | + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc_1 pcall + | lea RA, [BASE+8] + | sub NARGS:RD, 1 + | mov PC, 8+FRAME_PCALL + |1: + | movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)] + | shr RB, HOOK_ACTIVE_SHIFT + | and RB, 1 + | add PC, RB // Remember active hook before pcall. + | jmp ->vm_call_dispatch + | + |.ffunc_2 xpcall + | cmp dword [BASE+12], LJ_TFUNC; jne ->fff_fallback + | mov RB, [BASE+4] // Swap function and traceback. + | mov [BASE+12], RB + | mov dword [BASE+4], LJ_TFUNC + | mov LFUNC:RB, [BASE] + | mov PC, [BASE+8] + | mov [BASE+8], LFUNC:RB + | mov [BASE], PC + | lea RA, [BASE+16] + | sub NARGS:RD, 2 + | mov PC, 16+FRAME_PCALL + | jmp <1 + | + |//-- Coroutine library -------------------------------------------------- + | + |.macro coroutine_resume_wrap, resume + |.if resume + |.ffunc_1 coroutine_resume + | mov L:RB, [BASE] + |.else + |.ffunc coroutine_wrap_aux + | mov CFUNC:RB, [BASE-8] + | mov L:RB, CFUNC:RB->upvalue[0].gcr + |.endif + | mov PC, [BASE-4] + | mov SAVE_PC, PC + |.if X64 + | mov TMP1, L:RB + |.else + | mov ARG1, L:RB + |.endif + |.if resume + | cmp dword [BASE+4], LJ_TTHREAD; jne ->fff_fallback + |.endif + | cmp aword L:RB->cframe, 0; jne ->fff_fallback + | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback + | mov RA, L:RB->top + | je >1 // Status != LUA_YIELD (i.e. 0)? + | cmp RA, L:RB->base // Check for presence of initial func. + | je ->fff_fallback + |1: + |.if resume + | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread). + |.else + | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1). + |.endif + | cmp PC, L:RB->maxstack; ja ->fff_fallback + | mov L:RB->top, PC + | + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + |.if resume + | add BASE, 8 // Keep resumed thread in stack for GC. + |.endif + | mov L:RB->top, BASE + |.if resume + | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move. + |.else + | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move. + |.endif + | sub RBa, PCa // Relative to PC. + | + | cmp PC, RA + | je >3 + |2: // Move args to coroutine. + |.if X64 + | mov RCa, [PC+RB] + | mov [PC-8], RCa + |.else + | mov RC, [PC+RB+4] + | mov [PC-4], RC + | mov RC, [PC+RB] + | mov [PC-8], RC + |.endif + | sub PC, 8 + | cmp PC, RA + | jne <2 + |3: + |.if X64 + | mov CARG2d, RA + | mov CARG1d, TMP1 + |.else + | mov ARG2, RA + | xor RA, RA + | mov ARG4, RA + | mov ARG3, RA + |.endif + | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) + | set_vmstate INTERP + | + | mov L:RB, SAVE_L + |.if X64 + | mov L:PC, TMP1 + |.else + | mov L:PC, ARG1 // The callee doesn't modify SAVE_L. + |.endif + | mov BASE, L:RB->base + | cmp eax, LUA_YIELD + | ja >8 + |4: + | mov RA, L:PC->base + | mov KBASE, L:PC->top + | mov L:PC->top, RA // Clear coroutine stack. + | mov PC, KBASE + | sub PC, RA + | je >6 // No results? + | lea RD, [BASE+PC] + | shr PC, 3 + | cmp RD, L:RB->maxstack + | ja >9 // Need to grow stack? + | + | mov RB, BASE + | sub RBa, RAa + |5: // Move results from coroutine. + |.if X64 + | mov RDa, [RA] + | mov [RA+RB], RDa + |.else + | mov RD, [RA] + | mov [RA+RB], RD + | mov RD, [RA+4] + | mov [RA+RB+4], RD + |.endif + | add RA, 8 + | cmp RA, KBASE + | jne <5 + |6: + |.if resume + | lea RD, [PC+2] // nresults+1 = 1 + true + results. + | mov dword [BASE-4], LJ_TTRUE // Prepend true to results. + |.else + | lea RD, [PC+1] // nresults+1 = 1 + results. + |.endif + |7: + | mov PC, SAVE_PC + | mov MULTRES, RD + |.if resume + | mov RAa, -8 + |.else + | xor RA, RA + |.endif + | test PC, FRAME_TYPE + | jz ->BC_RET_Z + | jmp ->vm_return + | + |8: // Coroutine returned with error (at co->top-1). + |.if resume + | mov dword [BASE-4], LJ_TFALSE // Prepend false to results. + | mov RA, L:PC->top + | sub RA, 8 + | mov L:PC->top, RA // Clear error from coroutine stack. + | // Copy error message. + |.if X64 + | mov RDa, [RA] + | mov [BASE], RDa + |.else + | mov RD, [RA] + | mov [BASE], RD + | mov RD, [RA+4] + | mov [BASE+4], RD + |.endif + | mov RD, 1+2 // nresults+1 = 1 + false + error. + | jmp <7 + |.else + | mov FCARG2, L:PC + | mov FCARG1, L:RB + | call extern lj_ffh_coroutine_wrap_err@8 // (lua_State *L, lua_State *co) + | // Error function does not return. + |.endif + | + |9: // Handle stack expansion on return from yield. + |.if X64 + | mov L:RA, TMP1 + |.else + | mov L:RA, ARG1 // The callee doesn't modify SAVE_L. + |.endif + | mov L:RA->top, KBASE // Undo coroutine stack clearing. + | mov FCARG2, PC + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + |.if X64 + | mov L:PC, TMP1 + |.else + | mov L:PC, ARG1 + |.endif + | mov BASE, L:RB->base + | jmp <4 // Retry the stack move. + |.endmacro + | + | coroutine_resume_wrap 1 // coroutine.resume + | coroutine_resume_wrap 0 // coroutine.wrap + | + |.ffunc coroutine_yield + | mov L:RB, SAVE_L + | test aword L:RB->cframe, CFRAME_RESUME + | jz ->fff_fallback + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->top, RD + | xor RD, RD + | mov aword L:RB->cframe, RDa + | mov al, LUA_YIELD + | mov byte L:RB->status, al + | jmp ->vm_leave_unw + | + |//-- Math library ------------------------------------------------------- + | + if (!LJ_DUALNUM) { + |->fff_resi: // Dummy. + } + if (sse) { + |->fff_resn: + | mov PC, [BASE-4] + | fstp qword [BASE-8] + | jmp ->fff_res1 + } + | .ffunc_1 math_abs + if (LJ_DUALNUM) { + | cmp dword [BASE+4], LJ_TISNUM; jne >2 + | mov RB, dword [BASE] + | cmp RB, 0; jns ->fff_resi + | neg RB; js >1 + |->fff_resbit: + |->fff_resi: + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TISNUM + | mov dword [BASE-8], RB + | jmp ->fff_res1 + |1: + | mov PC, [BASE-4] + | mov dword [BASE-4], 0x41e00000 // 2^31. + | mov dword [BASE-8], 0 + | jmp ->fff_res1 + |2: + | ja ->fff_fallback + } else { + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + } + if (sse) { + | movsd xmm0, qword [BASE] + | sseconst_abs xmm1, RDa + | andps xmm0, xmm1 + |->fff_resxmm0: + | mov PC, [BASE-4] + | movsd qword [BASE-8], xmm0 + | // fallthrough + } else { + | fld qword [BASE] + | fabs + | // fallthrough + |->fff_resxmm0: // Dummy. + |->fff_resn: + | mov PC, [BASE-4] + | fstp qword [BASE-8] + } + |->fff_res1: + | mov RD, 1+1 + |->fff_res: + | mov MULTRES, RD + |->fff_res_: + | test PC, FRAME_TYPE + | jnz >7 + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + | // Adjust BASE. KBASE is assumed to be set for the calling frame. + | movzx RA, PC_RA + | not RAa // Note: ~RA = -(RA+1) + | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 + | ins_next + | + |6: // Fill up results with nil. + | mov dword [BASE+RD*8-12], LJ_TNIL + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | mov RAa, -8 // Results start at BASE+RA = BASE-8. + | jmp ->vm_return + | + |.macro math_round, func + | .ffunc math_ .. func + ||if (LJ_DUALNUM) { + | cmp dword [BASE+4], LJ_TISNUM; jne >1 + | mov RB, dword [BASE]; jmp ->fff_resi + |1: + | ja ->fff_fallback + ||} else { + | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback + ||} + ||if (sse) { + | movsd xmm0, qword [BASE] + | call ->vm_ .. func + || if (LJ_DUALNUM) { + | cvtsd2si RB, xmm0 + | cmp RB, 0x80000000 + | jne ->fff_resi + | cvtsi2sd xmm1, RB + | ucomisd xmm0, xmm1 + | jp ->fff_resxmm0 + | je ->fff_resi + || } + | jmp ->fff_resxmm0 + ||} else { + | fld qword [BASE] + | call ->vm_ .. func + || if (LJ_DUALNUM) { + |.if not X64 + | fist ARG1 + | mov RB, ARG1 + | cmp RB, 0x80000000; jne >2 + | fdup + | fild ARG1 + | fcomparepp + | jp ->fff_resn + | jne ->fff_resn + |2: + | fpop + | jmp ->fff_resi + |.endif + || } else { + | jmp ->fff_resn + || } + ||} + |.endmacro + | + | math_round floor + | math_round ceil + | + if (sse) { + |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0 + } else { + |.ffunc_n math_sqrt; fsqrt; jmp ->fff_resn + } + |.ffunc_n math_log, fldln2; fyl2x; jmp ->fff_resn + |.ffunc_n math_log10, fldlg2; fyl2x; jmp ->fff_resn + |.ffunc_n math_exp; call ->vm_exp_x87; jmp ->fff_resn + | + |.ffunc_n math_sin; fsin; jmp ->fff_resn + |.ffunc_n math_cos; fcos; jmp ->fff_resn + |.ffunc_n math_tan; fptan; fpop; jmp ->fff_resn + | + |.ffunc_n math_asin + | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fpatan + | jmp ->fff_resn + |.ffunc_n math_acos + | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fxch; fpatan + | jmp ->fff_resn + |.ffunc_n math_atan; fld1; fpatan; jmp ->fff_resn + | + |.macro math_extern, func + ||if (sse) { + | .ffunc_nsse math_ .. func + | .if not X64 + | movsd FPARG1, xmm0 + | .endif + ||} else { + | .if not X64 + | .ffunc_n math_ .. func + | fstp FPARG1 + | .endif + ||} + | mov RB, BASE + | call extern lj_vm_ .. func + | mov BASE, RB + | .if X64 + | jmp ->fff_resxmm0 + | .else + | jmp ->fff_resn + | .endif + |.endmacro + | + | math_extern sinh + | math_extern cosh + | math_extern tanh + | + |->ff_math_deg: + if (sse) { + |.ffunc_nsse math_rad + | mov CFUNC:RB, [BASE-8] + | mulsd xmm0, qword CFUNC:RB->upvalue[0] + | jmp ->fff_resxmm0 + } else { + |.ffunc_n math_rad + | mov CFUNC:RB, [BASE-8] + | fmul qword CFUNC:RB->upvalue[0] + | jmp ->fff_resn + } + | + |.ffunc_nn math_atan2; fpatan; jmp ->fff_resn + |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn + | + |.ffunc_1 math_frexp + | mov RB, [BASE+4] + | cmp RB, LJ_TISNUM; jae ->fff_fallback + | mov PC, [BASE-4] + | mov RC, [BASE] + | mov [BASE-4], RB; mov [BASE-8], RC + | shl RB, 1; cmp RB, 0xffe00000; jae >3 + | or RC, RB; jz >3 + | mov RC, 1022 + | cmp RB, 0x00200000; jb >4 + |1: + | shr RB, 21; sub RB, RC // Extract and unbias exponent. + if (sse) { + | cvtsi2sd xmm0, RB + } else { + | mov TMP1, RB; fild TMP1 + } + | mov RB, [BASE-4] + | and RB, 0x800fffff // Mask off exponent. + | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0. + | mov [BASE-4], RB + |2: + if (sse) { + | movsd qword [BASE], xmm0 + } else { + | fstp qword [BASE] + } + | mov RD, 1+2 + | jmp ->fff_res + |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0. + if (sse) { + | xorps xmm0, xmm0; jmp <2 + } else { + | fldz; jmp <2 + } + |4: // Handle denormals by multiplying with 2^54 and adjusting the bias. + if (sse) { + | movsd xmm0, qword [BASE] + | sseconst_hi xmm1, RBa, 43500000 // 2^54. + | mulsd xmm0, xmm1 + | movsd qword [BASE-8], xmm0 + } else { + | fld qword [BASE] + | mov TMP1, 0x5a800000; fmul TMP1 // x = x*2^54 + | fstp qword [BASE-8] + } + | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1 + | + if (sse) { + |.ffunc_nsse math_modf + } else { + |.ffunc_n math_modf + } + | mov RB, [BASE+4] + | mov PC, [BASE-4] + | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf? + if (sse) { + | movaps xmm4, xmm0 + | call ->vm_trunc + | subsd xmm4, xmm0 + |1: + | movsd qword [BASE-8], xmm0 + | movsd qword [BASE], xmm4 + } else { + | fdup + | call ->vm_trunc + | fsub st1, st0 + |1: + | fstp qword [BASE-8] + | fstp qword [BASE] + } + | mov RC, [BASE-4]; mov RB, [BASE+4] + | xor RC, RB; js >3 // Need to adjust sign? + |2: + | mov RD, 1+2 + | jmp ->fff_res + |3: + | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction. + | jmp <2 + |4: + if (sse) { + | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0. + } else { + | fldz; fxch; jmp <1 // Return +-Inf and +-0. + } + | + |.ffunc_nnr math_fmod + |1: ; fprem; fnstsw ax; sahf; jp <1 + | fpop1 + | jmp ->fff_resn + | + if (sse) { + |.ffunc_nnsse math_pow; call ->vm_pow; jmp ->fff_resxmm0 + } else { + |.ffunc_nn math_pow; call ->vm_pow; jmp ->fff_resn + } + | + |.macro math_minmax, name, cmovop, fcmovop, nofcmovop, sseop + | .ffunc name + | mov RA, 2 + | cmp dword [BASE+4], LJ_TISNUM + ||if (LJ_DUALNUM) { + | jne >4 + | mov RB, dword [BASE] + |1: // Handle integers. + | cmp RA, RD; jae ->fff_resi + | cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3 + | cmp RB, dword [BASE+RA*8-8] + | cmovop RB, dword [BASE+RA*8-8] + | add RA, 1 + | jmp <1 + |3: + | ja ->fff_fallback + | // Convert intermediate result to number and continue below. + ||if (sse) { + | cvtsi2sd xmm0, RB + ||} else { + |.if not X64 + | mov TMP1, RB + | fild TMP1 + |.endif + ||} + | jmp >6 + |4: + | ja ->fff_fallback + ||} else { + | jae ->fff_fallback + ||} + | + ||if (sse) { + | movsd xmm0, qword [BASE] + |5: // Handle numbers or integers. + | cmp RA, RD; jae ->fff_resxmm0 + | cmp dword [BASE+RA*8-4], LJ_TISNUM + ||if (LJ_DUALNUM) { + | jb >6 + | ja ->fff_fallback + | cvtsi2sd xmm1, dword [BASE+RA*8-8] + | jmp >7 + ||} else { + | jae ->fff_fallback + ||} + |6: + | movsd xmm1, qword [BASE+RA*8-8] + |7: + | sseop xmm0, xmm1 + | add RA, 1 + | jmp <5 + ||} else { + |.if not X64 + | fld qword [BASE] + |5: // Handle numbers or integers. + | cmp RA, RD; jae ->fff_resn + | cmp dword [BASE+RA*8-4], LJ_TISNUM + ||if (LJ_DUALNUM) { + | jb >6 + | ja >9 + | fild dword [BASE+RA*8-8] + | jmp >7 + ||} else { + | jae >9 + ||} + |6: + | fld qword [BASE+RA*8-8] + |7: + ||if (cmov) { + | fucomi st1; fcmovop st1; fpop1 + ||} else { + | push eax + | fucom st1; fnstsw ax; test ah, 1; nofcmovop >2; fxch; 2: ; fpop + | pop eax + ||} + | add RA, 1 + | jmp <5 + |.endif + ||} + |.endmacro + | + | math_minmax math_min, cmovg, fcmovnbe, jz, minsd + | math_minmax math_max, cmovl, fcmovbe, jnz, maxsd + if (!sse) { + |9: + | fpop; jmp ->fff_fallback + } + | + |//-- String library ----------------------------------------------------- + | + |.ffunc_1 string_len + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov STR:RB, [BASE] + if (LJ_DUALNUM) { + | mov RB, dword STR:RB->len; jmp ->fff_resi + } else if (sse) { + | cvtsi2sd xmm0, dword STR:RB->len; jmp ->fff_resxmm0 + } else { + | fild dword STR:RB->len; jmp ->fff_resn + } + | + |.ffunc string_byte // Only handle the 1-arg case here. + | cmp NARGS:RD, 1+1; jne ->fff_fallback + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov STR:RB, [BASE] + | mov PC, [BASE-4] + | cmp dword STR:RB->len, 1 + | jb ->fff_res0 // Return no results for empty string. + | movzx RB, byte STR:RB[1] + if (LJ_DUALNUM) { + | jmp ->fff_resi + } else if (sse) { + | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0 + } else { + | mov TMP1, RB; fild TMP1; jmp ->fff_resn + } + | + |.ffunc string_char // Only handle the 1-arg case here. + | ffgccheck + | cmp NARGS:RD, 1+1; jne ->fff_fallback // *Exactly* 1 arg. + | cmp dword [BASE+4], LJ_TISNUM + if (LJ_DUALNUM) { + | jne ->fff_fallback + | mov RB, dword [BASE] + | cmp RB, 255; ja ->fff_fallback + | mov TMP2, RB + } else if (sse) { + | jae ->fff_fallback + | cvttsd2si RB, qword [BASE] + | cmp RB, 255; ja ->fff_fallback + | mov TMP2, RB + } else { + | jae ->fff_fallback + | fld qword [BASE] + | fistp TMP2 + | cmp TMP2, 255; ja ->fff_fallback + } + |.if X64 + | mov TMP3, 1 + |.else + | mov ARG3, 1 + |.endif + | lea RDa, TMP2 // Points to stack. Little-endian. + |->fff_newstr: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + |.if X64 + | mov CARG3d, TMP3 // Zero-extended to size_t. + | mov CARG2, RDa // May be 64 bit ptr to stack. + | mov CARG1d, L:RB + |.else + | mov ARG2, RD + | mov ARG1, L:RB + |.endif + | mov SAVE_PC, PC + | call extern lj_str_new // (lua_State *L, char *str, size_t l) + | // GCstr * returned in eax (RD). + | mov BASE, L:RB->base + | mov PC, [BASE-4] + | mov dword [BASE-4], LJ_TSTR + | mov [BASE-8], STR:RD + | jmp ->fff_res1 + | + |.ffunc string_sub + | ffgccheck + | mov TMP2, -1 + | cmp NARGS:RD, 1+2; jb ->fff_fallback + | jna >1 + | cmp dword [BASE+20], LJ_TISNUM + if (LJ_DUALNUM) { + | jne ->fff_fallback + | mov RB, dword [BASE+16] + | mov TMP2, RB + } else if (sse) { + | jae ->fff_fallback + | cvttsd2si RB, qword [BASE+16] + | mov TMP2, RB + } else { + | jae ->fff_fallback + | fld qword [BASE+16] + | fistp TMP2 + } + |1: + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM + if (LJ_DUALNUM) { + | jne ->fff_fallback + } else { + | jae ->fff_fallback + } + | mov STR:RB, [BASE] + | mov TMP3, STR:RB + | mov RB, STR:RB->len + if (LJ_DUALNUM) { + | mov RA, dword [BASE+8] + } else if (sse) { + | cvttsd2si RA, qword [BASE+8] + } else { + |.if not X64 + | fld qword [BASE+8] + | fistp ARG3 + | mov RA, ARG3 + |.endif + } + | mov RC, TMP2 + | cmp RB, RC // len < end? (unsigned compare) + | jb >5 + |2: + | test RA, RA // start <= 0? + | jle >7 + |3: + | mov STR:RB, TMP3 + | sub RC, RA // start > end? + | jl ->fff_emptystr + | lea RB, [STR:RB+RA+#STR-1] + | add RC, 1 + |4: + |.if X64 + | mov TMP3, RC + |.else + | mov ARG3, RC + |.endif + | mov RD, RB + | jmp ->fff_newstr + | + |5: // Negative end or overflow. + | jl >6 + | lea RC, [RC+RB+1] // end = end+(len+1) + | jmp <2 + |6: // Overflow. + | mov RC, RB // end = len + | jmp <2 + | + |7: // Negative start or underflow. + | je >8 + | add RA, RB // start = start+(len+1) + | add RA, 1 + | jg <3 // start > 0? + |8: // Underflow. + | mov RA, 1 // start = 1 + | jmp <3 + | + |->fff_emptystr: // Range underflow. + | xor RC, RC // Zero length. Any ptr in RB is ok. + | jmp <4 + | + |.ffunc_2 string_rep // Only handle the 1-char case inline. + | ffgccheck + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | cmp dword [BASE+12], LJ_TISNUM + | mov STR:RB, [BASE] + if (LJ_DUALNUM) { + | jne ->fff_fallback + | mov RC, dword [BASE+8] + } else if (sse) { + | jae ->fff_fallback + | cvttsd2si RC, qword [BASE+8] + } else { + | jae ->fff_fallback + | fld qword [BASE+8] + | fistp TMP2 + | mov RC, TMP2 + } + | test RC, RC + | jle ->fff_emptystr // Count <= 0? (or non-int) + | cmp dword STR:RB->len, 1 + | jb ->fff_emptystr // Zero length string? + | jne ->fff_fallback_2 // Fallback for > 1-char strings. + | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_2 + | movzx RA, byte STR:RB[1] + | mov RB, [DISPATCH+DISPATCH_GL(tmpbuf.buf)] + |.if X64 + | mov TMP3, RC + |.else + | mov ARG3, RC + |.endif + |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?). + | mov [RB], RAL + | add RB, 1 + | sub RC, 1 + | jnz <1 + | mov RD, [DISPATCH+DISPATCH_GL(tmpbuf.buf)] + | jmp ->fff_newstr + | + |.ffunc_1 string_reverse + | ffgccheck + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov STR:RB, [BASE] + | mov RC, STR:RB->len + | test RC, RC + | jz ->fff_emptystr // Zero length string? + | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1 + | add RB, #STR + | mov TMP2, PC // Need another temp register. + |.if X64 + | mov TMP3, RC + |.else + | mov ARG3, RC + |.endif + | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)] + |1: + | movzx RA, byte [RB] + | add RB, 1 + | sub RC, 1 + | mov [PC+RC], RAL + | jnz <1 + | mov RD, PC + | mov PC, TMP2 + | jmp ->fff_newstr + | + |.macro ffstring_case, name, lo, hi + | .ffunc_1 name + | ffgccheck + | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback + | mov STR:RB, [BASE] + | mov RC, STR:RB->len + | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1 + | add RB, #STR + | mov TMP2, PC // Need another temp register. + |.if X64 + | mov TMP3, RC + |.else + | mov ARG3, RC + |.endif + | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)] + | jmp >3 + |1: // ASCII case conversion. Yes, this is suboptimal code (do you care?). + | movzx RA, byte [RB+RC] + | cmp RA, lo + | jb >2 + | cmp RA, hi + | ja >2 + | xor RA, 0x20 + |2: + | mov [PC+RC], RAL + |3: + | sub RC, 1 + | jns <1 + | mov RD, PC + | mov PC, TMP2 + | jmp ->fff_newstr + |.endmacro + | + |ffstring_case string_lower, 0x41, 0x5a + |ffstring_case string_upper, 0x61, 0x7a + | + |//-- Table library ------------------------------------------------------ + | + |.ffunc_1 table_getn + | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback + | mov RB, BASE // Save BASE. + | mov TAB:FCARG1, [BASE] + | call extern lj_tab_len@4 // LJ_FASTCALL (GCtab *t) + | // Length of table returned in eax (RD). + | mov BASE, RB // Restore BASE. + if (LJ_DUALNUM) { + | mov RB, RD; jmp ->fff_resi + } else if (sse) { + | cvtsi2sd xmm0, RD; jmp ->fff_resxmm0 + } else { + |.if not X64 + | mov ARG1, RD; fild ARG1; jmp ->fff_resn + |.endif + } + | + |//-- Bit library -------------------------------------------------------- + | + |.define TOBIT_BIAS, 0x59c00000 // 2^52 + 2^51 (float, not double!). + | + |.macro .ffunc_bit, name, kind + | .ffunc_1 name + |.if kind == 2 + ||if (sse) { + | sseconst_tobit xmm1, RBa + ||} else { + | mov TMP1, TOBIT_BIAS + ||} + |.endif + | cmp dword [BASE+4], LJ_TISNUM + ||if (LJ_DUALNUM) { + | jne >1 + | mov RB, dword [BASE] + |.if kind > 0 + | jmp >2 + |.else + | jmp ->fff_resbit + |.endif + |1: + | ja ->fff_fallback + ||} else { + | jae ->fff_fallback + ||} + ||if (sse) { + | movsd xmm0, qword [BASE] + |.if kind < 2 + | sseconst_tobit xmm1, RBa + |.endif + | addsd xmm0, xmm1 + | movd RB, xmm0 + ||} else { + |.if not X64 + | fld qword [BASE] + |.if kind < 2 + | mov TMP1, TOBIT_BIAS + |.endif + | fadd TMP1 + | fstp FPARG1 + |.if kind > 0 + | mov RB, ARG1 + |.endif + |.endif + ||} + |2: + |.endmacro + | + |.ffunc_bit bit_tobit, 0 + if (LJ_DUALNUM || sse) { + if (!sse) { + |.if not X64 + | mov RB, ARG1 + |.endif + } + | jmp ->fff_resbit + } else { + |.if not X64 + | fild ARG1 + | jmp ->fff_resn + |.endif + } + | + |.macro .ffunc_bit_op, name, ins + | .ffunc_bit name, 2 + | mov TMP2, NARGS:RD // Save for fallback. + | lea RD, [BASE+NARGS:RD*8-16] + |1: + | cmp RD, BASE + | jbe ->fff_resbit + | cmp dword [RD+4], LJ_TISNUM + ||if (LJ_DUALNUM) { + | jne >2 + | ins RB, dword [RD] + | sub RD, 8 + | jmp <1 + |2: + | ja ->fff_fallback_bit_op + ||} else { + | jae ->fff_fallback_bit_op + ||} + ||if (sse) { + | movsd xmm0, qword [RD] + | addsd xmm0, xmm1 + | movd RA, xmm0 + | ins RB, RA + ||} else { + |.if not X64 + | fld qword [RD] + | fadd TMP1 + | fstp FPARG1 + | ins RB, ARG1 + |.endif + ||} + | sub RD, 8 + | jmp <1 + |.endmacro + | + |.ffunc_bit_op bit_band, and + |.ffunc_bit_op bit_bor, or + |.ffunc_bit_op bit_bxor, xor + | + |.ffunc_bit bit_bswap, 1 + | bswap RB + | jmp ->fff_resbit + | + |.ffunc_bit bit_bnot, 1 + | not RB + if (LJ_DUALNUM) { + | jmp ->fff_resbit + } else if (sse) { + |->fff_resbit: + | cvtsi2sd xmm0, RB + | jmp ->fff_resxmm0 + } else { + |.if not X64 + |->fff_resbit: + | mov ARG1, RB + | fild ARG1 + | jmp ->fff_resn + |.endif + } + | + |->fff_fallback_bit_op: + | mov NARGS:RD, TMP2 // Restore for fallback + | jmp ->fff_fallback + | + |.macro .ffunc_bit_sh, name, ins + ||if (LJ_DUALNUM) { + | .ffunc_bit name, 1 + | // Note: no inline conversion from number for 2nd argument! + | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback + | mov RA, dword [BASE+8] + ||} else if (sse) { + | .ffunc_nnsse name + | sseconst_tobit xmm2, RBa + | addsd xmm0, xmm2 + | addsd xmm1, xmm2 + | movd RB, xmm0 + | movd RA, xmm1 + ||} else { + |.if not X64 + | .ffunc_nn name + | mov TMP1, TOBIT_BIAS + | fadd TMP1 + | fstp FPARG3 + | fadd TMP1 + | fstp FPARG1 + | mov RA, ARG3 + | mov RB, ARG1 + |.endif + ||} + | ins RB, cl // Assumes RA is ecx. + | jmp ->fff_resbit + |.endmacro + | + |.ffunc_bit_sh bit_lshift, shl + |.ffunc_bit_sh bit_rshift, shr + |.ffunc_bit_sh bit_arshift, sar + |.ffunc_bit_sh bit_rol, rol + |.ffunc_bit_sh bit_ror, ror + | + |//----------------------------------------------------------------------- + | + |->fff_fallback_2: + | mov NARGS:RD, 1+2 // Other args are ignored, anyway. + | jmp ->fff_fallback + |->fff_fallback_1: + | mov NARGS:RD, 1+1 // Other args are ignored, anyway. + |->fff_fallback: // Call fast function fallback handler. + | // BASE = new base, RD = nargs+1 + | mov L:RB, SAVE_L + | mov PC, [BASE-4] // Fallback may overwrite PC. + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler. + | mov L:RB->top, RD + | mov CFUNC:RD, [BASE-8] + | cmp RA, L:RB->maxstack + | ja >5 // Need to grow stack. + |.if X64 + | mov CARG1d, L:RB + |.else + | mov ARG1, L:RB + |.endif + | call aword CFUNC:RD->f // (lua_State *L) + | mov BASE, L:RB->base + | // Either throws an error, or recovers and returns -1, 0 or nresults+1. + | test RD, RD; jg ->fff_res // Returned nresults+1? + |1: + | mov RA, L:RB->top + | sub RA, BASE + | shr RA, 3 + | test RD, RD + | lea NARGS:RD, [RA+1] + | mov LFUNC:RB, [BASE-8] + | jne ->vm_call_tail // Returned -1? + | ins_callt // Returned 0: retry fast path. + | + |// Reconstruct previous base for vmeta_call during tailcall. + |->vm_call_tail: + | mov RA, BASE + | test PC, FRAME_TYPE + | jnz >3 + | movzx RB, PC_RA + | not RBa // Note: ~RB = -(RB+1) + | lea BASE, [BASE+RB*8] // base = base - (RB+1)*8 + | jmp ->vm_call_dispatch // Resolve again for tailcall. + |3: + | mov RB, PC + | and RB, -8 + | sub BASE, RB + | jmp ->vm_call_dispatch // Resolve again for tailcall. + | + |5: // Grow stack for fallback handler. + | mov FCARG2, LUA_MINSTACK + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | xor RD, RD // Simulate a return 0. + | jmp <1 // Dumb retry (goes through ff first). + | + |->fff_gcstep: // Call GC step function. + | // BASE = new base, RD = nargs+1 + | pop RBa // Must keep stack at same level. + | mov TMPa, RBa // Save return address + | mov L:RB, SAVE_L + | mov SAVE_PC, PC // Redundant (but a defined value). + | mov L:RB->base, BASE + | lea RD, [BASE+NARGS:RD*8-8] + | mov FCARG1, L:RB + | mov L:RB->top, RD + | call extern lj_gc_step@4 // (lua_State *L) + | mov BASE, L:RB->base + | mov RD, L:RB->top + | sub RD, BASE + | shr RD, 3 + | add NARGS:RD, 1 + | mov RBa, TMPa + | push RBa // Restore return address. + | ret + | + |//----------------------------------------------------------------------- + |//-- Special dispatch targets ------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_record: // Dispatch target for recording phase. +#if LJ_HASJIT + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_VMEVENT // No recording while in vmevent. + | jnz >5 + | // Decrement the hookcount for consistency, but always do the call. + | test RDL, HOOK_ACTIVE + | jnz >1 + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >1 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jmp >1 +#endif + | + |->vm_rethook: // Dispatch target for return hooks. + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | jmp >1 + | + |->vm_inshook: // Dispatch target for instr/line hooks. + | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)] + | test RDL, HOOK_ACTIVE // Hook already active? + | jnz >5 + | + | test RDL, LUA_MASKLINE|LUA_MASKCOUNT + | jz >5 + | dec dword [DISPATCH+DISPATCH_GL(hookcount)] + | jz >1 + | test RDL, LUA_MASKLINE + | jz >5 + |1: + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov FCARG2, PC // Caveat: FCARG2 == BASE + | mov FCARG1, L:RB + | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. + | call extern lj_dispatch_ins@8 // (lua_State *L, BCIns *pc) + |3: + | mov BASE, L:RB->base + |4: + | movzx RA, PC_RA + |5: + | movzx OP, PC_OP + | movzx RD, PC_RD + |.if X64 + | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins. + |.else + | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Re-dispatch to static ins. + |.endif + | + |->cont_hook: // Continue from hook yield. + | add PC, 4 + | mov RA, [RB-24] + | mov MULTRES, RA // Restore MULTRES for *M ins. + | jmp <4 + | + |->vm_hotloop: // Hot loop counter underflow. +#if LJ_HASJIT + | mov LFUNC:RB, [BASE-8] // Same as curr_topL(L). + | mov RB, LFUNC:RB->pc + | movzx RD, byte [RB+PC2PROTO(framesize)] + | lea RD, [BASE+RD*8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov FCARG2, PC + | lea FCARG1, [DISPATCH+GG_DISP2J] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa + | mov SAVE_PC, PC + | call extern lj_trace_hot@8 // (jit_State *J, const BCIns *pc) + | jmp <3 +#endif + | + |->vm_callhook: // Dispatch target for call hooks. + | mov SAVE_PC, PC +#if LJ_HASJIT + | jmp >1 +#endif + | + |->vm_hotcall: // Hot call counter underflow. +#if LJ_HASJIT + | mov SAVE_PC, PC + | or PC, 1 // Marker for hot call. + |1: +#endif + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov L:RB->top, RD + | mov FCARG2, PC + | mov FCARG1, L:RB + | call extern lj_dispatch_call@8 // (lua_State *L, const BCIns *pc) + | // ASMFunction returned in eax/rax (RDa). + | mov SAVE_PC, 0 // Invalidate for subsequent line hook. +#if LJ_HASJIT + | and PC, -2 +#endif + | mov BASE, L:RB->base + | mov RAa, RDa + | mov RD, L:RB->top + | sub RD, BASE + | mov RBa, RAa + | movzx RA, PC_RA + | shr RD, 3 + | add NARGS:RD, 1 + | jmp RBa + | + |//----------------------------------------------------------------------- + |//-- Trace exit handler ------------------------------------------------- + |//----------------------------------------------------------------------- + | + |// Called from an exit stub with the exit number on the stack. + |// The 16 bit exit number is stored with two (sign-extended) push imm8. + |->vm_exit_handler: +#if LJ_HASJIT + |.if X64 + | push r13; push r12 + | push r11; push r10; push r9; push r8 + | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp + | push rbx; push rdx; push rcx; push rax + | movzx RC, byte [rbp-8] // Reconstruct exit number. + | mov RCH, byte [rbp-16] + | mov [rbp-8], r15; mov [rbp-16], r14 + |.else + | push ebp; lea ebp, [esp+12]; push ebp + | push ebx; push edx; push ecx; push eax + | movzx RC, byte [ebp-4] // Reconstruct exit number. + | mov RCH, byte [ebp-8] + | mov [ebp-4], edi; mov [ebp-8], esi + |.endif + | // Caveat: DISPATCH is ebx. + | mov DISPATCH, [ebp] + | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number. + | set_vmstate EXIT + | mov [DISPATCH+DISPATCH_J(exitno)], RC + | mov [DISPATCH+DISPATCH_J(parent)], RA + |.if X64 + |.if X64WIN + | sub rsp, 16*8+4*8 // Room for SSE regs + save area. + |.else + | sub rsp, 16*8 // Room for SSE regs. + |.endif + | add rbp, -128 + | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14 + | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12 + | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10 + | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8 + | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6 + | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4 + | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2 + | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0 + |.else + | sub esp, 8*8+16 // Room for SSE regs + args. + | movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6 + | movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4 + | movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2 + | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0 + |.endif + | // Caveat: RB is ebp. + | mov L:RB, [DISPATCH+DISPATCH_GL(jit_L)] + | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] + | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa + | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0 + | mov L:RB->base, BASE + |.if X64WIN + | lea CARG2, [rsp+4*8] + |.elif X64 + | mov CARG2, rsp + |.else + | lea FCARG2, [esp+16] + |.endif + | lea FCARG1, [DISPATCH+GG_DISP2J] + | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex) + | // MULTRES or negated error code returned in eax (RD). + | mov RAa, L:RB->cframe + | and RAa, CFRAME_RAWMASK + |.if X64WIN + | // Reposition stack later. + |.elif X64 + | mov rsp, RAa // Reposition stack to C frame. + |.else + | mov esp, RAa // Reposition stack to C frame. + |.endif + | mov [RAa+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield). + | mov BASE, L:RB->base + | mov PC, [RAa+CFRAME_OFS_PC] // Get SAVE_PC. + |.if X64 + | jmp >1 + |.endif +#endif + |->vm_exit_interp: + | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set. +#if LJ_HASJIT + |.if X64 + | // Restore additional callee-save registers only used in compiled code. + |.if X64WIN + | lea RAa, [rsp+9*16+4*8] + |1: + | movdqa xmm15, [RAa-9*16] + | movdqa xmm14, [RAa-8*16] + | movdqa xmm13, [RAa-7*16] + | movdqa xmm12, [RAa-6*16] + | movdqa xmm11, [RAa-5*16] + | movdqa xmm10, [RAa-4*16] + | movdqa xmm9, [RAa-3*16] + | movdqa xmm8, [RAa-2*16] + | movdqa xmm7, [RAa-1*16] + | mov rsp, RAa // Reposition stack to C frame. + | movdqa xmm6, [RAa] + | mov r15, CSAVE_3 + | mov r14, CSAVE_4 + |.else + | add rsp, 16 // Reposition stack to C frame. + |1: + |.endif + | mov r13, TMPa + | mov r12, TMPQ + |.endif + | test RD, RD; js >3 // Check for error from exit. + | mov MULTRES, RD + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0 + | set_vmstate INTERP + | // Modified copy of ins_next which handles function header dispatch, too. + | mov RC, [PC] + | movzx RA, RCH + | movzx OP, RCL + | add PC, 4 + | shr RC, 16 + | cmp OP, BC_FUNCF // Function header? + | jb >2 + | mov RC, MULTRES // RC/RD holds nres+1. + |2: + |.if X64 + | jmp aword [DISPATCH+OP*8] + |.else + | jmp aword [DISPATCH+OP*4] + |.endif + | + |3: // Rethrow error from the right C frame. + | neg RD + | mov FCARG1, L:RB + | mov FCARG2, RD + | call extern lj_err_throw@8 // (lua_State *L, int errcode) +#endif + | + |//----------------------------------------------------------------------- + |//-- Math helper functions ---------------------------------------------- + |//----------------------------------------------------------------------- + | + |// FP value rounding. Called by math.floor/math.ceil fast functions + |// and from JIT code. + | + |// x87 variant: Arg/ret on x87 stack. No int/xmm registers modified. + |.macro vm_round_x87, mode1, mode2 + | fnstcw word [esp+4] // Caveat: overwrites ARG1 and ARG2. + | mov [esp+8], eax + | mov ax, mode1 + | or ax, [esp+4] + |.if mode2 ~= 0xffff + | and ax, mode2 + |.endif + | mov [esp+6], ax + | fldcw word [esp+6] + | frndint + | fldcw word [esp+4] + | mov eax, [esp+8] + | ret + |.endmacro + | + |// SSE variant: arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. + |.macro vm_round_sse, mode + | sseconst_abs xmm2, RDa + | sseconst_2p52 xmm3, RDa + | movaps xmm1, xmm0 + | andpd xmm1, xmm2 // |x| + | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + |.if mode == 2 // trunc(x)? + | movaps xmm0, xmm1 + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | sseconst_1 xmm3, RDa + | cmpsd xmm0, xmm1, 1 // |x| < result? + | andpd xmm0, xmm3 + | subsd xmm1, xmm0 // If yes, subtract -1. + | orpd xmm1, xmm2 // Merge sign bit back in. + |.else + | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52 + | subsd xmm1, xmm3 + | orpd xmm1, xmm2 // Merge sign bit back in. + | .if mode == 1 // ceil(x)? + | sseconst_m1 xmm2, RDa // Must subtract -1 to preserve -0. + | cmpsd xmm0, xmm1, 6 // x > result? + | .else // floor(x)? + | sseconst_1 xmm2, RDa + | cmpsd xmm0, xmm1, 1 // x < result? + | .endif + | andpd xmm0, xmm2 + | subsd xmm1, xmm0 // If yes, subtract +-1. + |.endif + | movaps xmm0, xmm1 + |1: + | ret + |.endmacro + | + |.macro vm_round, name, ssemode, mode1, mode2 + |->name: + ||if (!sse) { + | vm_round_x87 mode1, mode2 + ||} + |->name .. _sse: + | vm_round_sse ssemode + |.endmacro + | + | vm_round vm_floor, 0, 0x0400, 0xf7ff + | vm_round vm_ceil, 1, 0x0800, 0xfbff + | vm_round vm_trunc, 2, 0x0c00, 0xffff + | + |// FP modulo x%y. Called by BC_MOD* and vm_arith. + |->vm_mod: + if (sse) { + |// Args in xmm0/xmm1, return value in xmm0. + |// Caveat: xmm0-xmm5 and RC (eax) modified! + | movaps xmm5, xmm0 + | divsd xmm0, xmm1 + | sseconst_abs xmm2, RDa + | sseconst_2p52 xmm3, RDa + | movaps xmm4, xmm0 + | andpd xmm4, xmm2 // |x/y| + | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|. + | jbe >1 + | andnpd xmm2, xmm0 // Isolate sign bit. + | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52 + | subsd xmm4, xmm3 + | orpd xmm4, xmm2 // Merge sign bit back in. + | sseconst_1 xmm2, RDa + | cmpsd xmm0, xmm4, 1 // x/y < result? + | andpd xmm0, xmm2 + | subsd xmm4, xmm0 // If yes, subtract 1.0. + | movaps xmm0, xmm5 + | mulsd xmm1, xmm4 + | subsd xmm0, xmm1 + | ret + |1: + | mulsd xmm1, xmm0 + | movaps xmm0, xmm5 + | subsd xmm0, xmm1 + | ret + } else { + |// Args/ret on x87 stack (y on top). No xmm registers modified. + |// Caveat: needs 3 slots on x87 stack! RC (eax) modified! + | fld st1 + | fdiv st1 + | fnstcw word [esp+4] + | mov ax, 0x0400 + | or ax, [esp+4] + | and ax, 0xf7ff + | mov [esp+6], ax + | fldcw word [esp+6] + | frndint + | fldcw word [esp+4] + | fmulp st1 + | fsubp st1 + | ret + } + | + |// FP exponentiation e^x and 2^x. Called by math.exp fast function and + |// from JIT code. Arg/ret on x87 stack. No int/xmm regs modified. + |// Caveat: needs 3 slots on x87 stack! + |->vm_exp_x87: + | fldl2e; fmulp st1 // e^x ==> 2^(x*log2(e)) + |->vm_exp2_x87: + | .if X64WIN + | .define expscratch, dword [rsp+8] // Use scratch area. + | .elif X64 + | .define expscratch, dword [rsp-8] // Use red zone. + | .else + | .define expscratch, dword [esp+4] // Needs 4 byte scratch area. + | .endif + | fst expscratch // Caveat: overwrites ARG1. + | cmp expscratch, 0x7f800000; je >1 // Special case: e^+Inf = +Inf + | cmp expscratch, 0xff800000; je >2 // Special case: e^-Inf = 0 + |->vm_exp2raw: // Entry point for vm_pow. Without +-Inf check. + | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part. + | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int + |1: + | ret + |2: + | fpop; fldz; ret + | + |// Generic power function x^y. Called by BC_POW, math.pow fast function, + |// and vm_arith. + if (!sse) { + |.if not X64 + |// Args/ret on x87 stack (y on top). RC (eax) modified. + |// Caveat: needs 3 slots on x87 stack! + |->vm_pow: + | fist dword [esp+4] // Store/reload int before comparison. + | fild dword [esp+4] // Integral exponent used in vm_powi. + ||if (cmov) { + | fucomip st1 + ||} else { + | fucomp st1; fnstsw ax; sahf + ||} + | jnz >8 // Branch for FP exponents. + | jp >9 // Branch for NaN exponent. + | fpop // Pop y and fallthrough to vm_powi. + | + |// FP/int power function x^i. Arg1/ret on x87 stack. + |// Arg2 (int) on C stack. RC (eax) modified. + |// Caveat: needs 2 slots on x87 stack! + | mov eax, [esp+4] + | cmp eax, 1; jle >6 // i<=1? + | // Now 1 < (unsigned)i <= 0x80000000. + |1: // Handle leading zeros. + | test eax, 1; jnz >2 + | fmul st0 + | shr eax, 1 + | jmp <1 + |2: + | shr eax, 1; jz >5 + | fdup + |3: // Handle trailing bits. + | fmul st0 + | shr eax, 1; jz >4 + | jnc <3 + | fmul st1, st0 + | jmp <3 + |4: + | fmulp st1 + |5: + | ret + |6: + | je <5 // x^1 ==> x + | jb >7 + | fld1; fdivrp st1 + | neg eax + | cmp eax, 1; je <5 // x^-1 ==> 1/x + | jmp <1 // x^-i ==> (1/x)^i + |7: + | fpop; fld1 // x^0 ==> 1 + | ret + | + |8: // FP/FP power function x^y. + | fst dword [esp+4] + | fxch + | fst dword [esp+8] + | mov eax, [esp+4]; shl eax, 1 + | cmp eax, 0xff000000; je >2 // x^+-Inf? + | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y? + | cmp eax, 0xff000000; je >4 // +-Inf^y? + | fyl2x + | jmp ->vm_exp2raw + | + |9: // Handle x^NaN. + | fld1 + ||if (cmov) { + | fucomip st2 + ||} else { + | fucomp st2; fnstsw ax; sahf + ||} + | je >1 // 1^NaN ==> 1 + | fxch // x^NaN ==> NaN + |1: + | fpop + | ret + | + |2: // Handle x^+-Inf. + | fabs + | fld1 + ||if (cmov) { + | fucomip st1 + ||} else { + | fucomp st1; fnstsw ax; sahf + ||} + | je >3 // +-1^+-Inf ==> 1 + | fpop; fabs; fldz; mov eax, 0; setc al + | ror eax, 1; xor eax, [esp+4]; jns >3 // |x|<>1, x^+-Inf ==> +Inf/0 + | fxch + |3: + | fpop1; fabs + | ret + | + |4: // Handle +-0^y or +-Inf^y. + | cmp dword [esp+4], 0; jge <3 // y >= 0, x^y ==> |x| + | fpop; fpop + | test eax, eax; jz >5 // y < 0, +-0^y ==> +Inf + | fldz // y < 0, +-Inf^y ==> 0 + | ret + |5: + | mov dword [esp+4], 0x7f800000 // Return +Inf. + | fld dword [esp+4] + | ret + |.endif + } else { + |->vm_pow: + } + | + |// Args in xmm0/xmm1. Ret in xmm0. xmm0-xmm2 and RC (eax) modified. + |// Needs 16 byte scratch area for x86. Also called from JIT code. + |->vm_pow_sse: + | cvtsd2si eax, xmm1 + | cvtsi2sd xmm2, eax + | ucomisd xmm1, xmm2 + | jnz >8 // Branch for FP exponents. + | jp >9 // Branch for NaN exponent. + | // Fallthrough to vm_powi_sse. + | + |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. + |->vm_powi_sse: + | cmp eax, 1; jle >6 // i<=1? + | // Now 1 < (unsigned)i <= 0x80000000. + |1: // Handle leading zeros. + | test eax, 1; jnz >2 + | mulsd xmm0, xmm0 + | shr eax, 1 + | jmp <1 + |2: + | shr eax, 1; jz >5 + | movaps xmm1, xmm0 + |3: // Handle trailing bits. + | mulsd xmm0, xmm0 + | shr eax, 1; jz >4 + | jnc <3 + | mulsd xmm1, xmm0 + | jmp <3 + |4: + | mulsd xmm0, xmm1 + |5: + | ret + |6: + | je <5 // x^1 ==> x + | jb >7 // x^0 ==> 1 + | neg eax + | call <1 + | sseconst_1 xmm1, RDa + | divsd xmm1, xmm0 + | movaps xmm0, xmm1 + | ret + |7: + | sseconst_1 xmm0, RDa + | ret + | + |8: // FP/FP power function x^y. + |.if X64 + | movd rax, xmm1; shl rax, 1 + | rol rax, 12; cmp rax, 0xffe; je >2 // x^+-Inf? + | movd rax, xmm0; shl rax, 1; je >4 // +-0^y? + | rol rax, 12; cmp rax, 0xffe; je >5 // +-Inf^y? + | .if X64WIN + | movsd qword [rsp+16], xmm1 // Use scratch area. + | movsd qword [rsp+8], xmm0 + | fld qword [rsp+16] + | fld qword [rsp+8] + | .else + | movsd qword [rsp-16], xmm1 // Use red zone. + | movsd qword [rsp-8], xmm0 + | fld qword [rsp-16] + | fld qword [rsp-8] + | .endif + |.else + | movsd qword [esp+12], xmm1 // Needs 16 byte scratch area. + | movsd qword [esp+4], xmm0 + | cmp dword [esp+12], 0; jne >1 + | mov eax, [esp+16]; shl eax, 1 + | cmp eax, 0xffe00000; je >2 // x^+-Inf? + |1: + | cmp dword [esp+4], 0; jne >1 + | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y? + | cmp eax, 0xffe00000; je >5 // +-Inf^y? + |1: + | fld qword [esp+12] + | fld qword [esp+4] + |.endif + | fyl2x // y*log2(x) + | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part. + | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int + |.if X64WIN + | fstp qword [rsp+8] // Use scratch area. + | movsd xmm0, qword [rsp+8] + |.elif X64 + | fstp qword [rsp-8] // Use red zone. + | movsd xmm0, qword [rsp-8] + |.else + | fstp qword [esp+4] // Needs 8 byte scratch area. + | movsd xmm0, qword [esp+4] + |.endif + | ret + | + |9: // Handle x^NaN. + | sseconst_1 xmm2, RDa + | ucomisd xmm0, xmm2; je >1 // 1^NaN ==> 1 + | movaps xmm0, xmm1 // x^NaN ==> NaN + |1: + | ret + | + |2: // Handle x^+-Inf. + | sseconst_abs xmm2, RDa + | andpd xmm0, xmm2 // |x| + | sseconst_1 xmm2, RDa + | ucomisd xmm0, xmm2; je <1 // +-1^+-Inf ==> 1 + | movmskpd eax, xmm1 + | xorps xmm0, xmm0 + | mov ah, al; setc al; xor al, ah; jne <1 // |x|<>1, x^+-Inf ==> +Inf/0 + |3: + | sseconst_hi xmm0, RDa, 7ff00000 // +Inf + | ret + | + |4: // Handle +-0^y. + | movmskpd eax, xmm1; test eax, eax; jnz <3 // y < 0, +-0^y ==> +Inf + | xorps xmm0, xmm0 // y >= 0, +-0^y ==> 0 + | ret + | + |5: // Handle +-Inf^y. + | movmskpd eax, xmm1; test eax, eax; jz <3 // y >= 0, +-Inf^y ==> +Inf + | xorps xmm0, xmm0 // y < 0, +-Inf^y ==> 0 + | ret + | + |// Callable from C: double lj_vm_foldfpm(double x, int fpm) + |// Computes fpm(x) for extended math functions. ORDER FPM. + |->vm_foldfpm: +#if LJ_HASJIT + if (sse) { + |.if X64 + | + | .if X64WIN + | .define fpmop, CARG2d + | .else + | .define fpmop, CARG1d + | .endif + | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil + | cmp fpmop, 3; jb ->vm_trunc; ja >2 + | sqrtsd xmm0, xmm0; ret + |2: + | .if X64WIN + | movsd qword [rsp+8], xmm0 // Use scratch area. + | fld qword [rsp+8] + | .else + | movsd qword [rsp-8], xmm0 // Use red zone. + | fld qword [rsp-8] + | .endif + | cmp fpmop, 5; ja >2 + | .if X64WIN; pop rax; .endif + | je >1 + | call ->vm_exp_x87 + | .if X64WIN; push rax; .endif + | jmp >7 + |1: + | call ->vm_exp2_x87 + | .if X64WIN; push rax; .endif + | jmp >7 + |2: ; cmp fpmop, 7; je >1; ja >2 + | fldln2; fxch; fyl2x; jmp >7 + |1: ; fld1; fxch; fyl2x; jmp >7 + |2: ; cmp fpmop, 9; je >1; ja >2 + | fldlg2; fxch; fyl2x; jmp >7 + |1: ; fsin; jmp >7 + |2: ; cmp fpmop, 11; je >1; ja >9 + | fcos; jmp >7 + |1: ; fptan; fpop + |7: + | .if X64WIN + | fstp qword [rsp+8] // Use scratch area. + | movsd xmm0, qword [rsp+8] + | .else + | fstp qword [rsp-8] // Use red zone. + | movsd xmm0, qword [rsp-8] + | .endif + | ret + | + |.else // x86 calling convention. + | + | .define fpmop, eax + | mov fpmop, [esp+12] + | movsd xmm0, qword [esp+4] + | cmp fpmop, 1; je >1; ja >2 + | call ->vm_floor; jmp >7 + |1: ; call ->vm_ceil; jmp >7 + |2: ; cmp fpmop, 3; je >1; ja >2 + | call ->vm_trunc; jmp >7 + |1: + | sqrtsd xmm0, xmm0 + |7: + | movsd qword [esp+4], xmm0 // Overwrite callee-owned args. + | fld qword [esp+4] + | ret + |2: ; fld qword [esp+4] + | cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87 + |2: ; cmp fpmop, 7; je >1; ja >2 + | fldln2; fxch; fyl2x; ret + |1: ; fld1; fxch; fyl2x; ret + |2: ; cmp fpmop, 9; je >1; ja >2 + | fldlg2; fxch; fyl2x; ret + |1: ; fsin; ret + |2: ; cmp fpmop, 11; je >1; ja >9 + | fcos; ret + |1: ; fptan; fpop; ret + | + |.endif + } else { + | mov fpmop, [esp+12] + | fld qword [esp+4] + | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil + | cmp fpmop, 3; jb ->vm_trunc; ja >2 + | fsqrt; ret + |2: ; cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87 + | cmp fpmop, 7; je >1; ja >2 + | fldln2; fxch; fyl2x; ret + |1: ; fld1; fxch; fyl2x; ret + |2: ; cmp fpmop, 9; je >1; ja >2 + | fldlg2; fxch; fyl2x; ret + |1: ; fsin; ret + |2: ; cmp fpmop, 11; je >1; ja >9 + | fcos; ret + |1: ; fptan; fpop; ret + } + |9: ; int3 // Bad fpm. +#endif + | + |// Callable from C: double lj_vm_foldarith(double x, double y, int op) + |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -) + |// and basic math functions. ORDER ARITH + |->vm_foldarith: + if (sse) { + |.if X64 + | + | .if X64WIN + | .define foldop, CARG3d + | .else + | .define foldop, CARG1d + | .endif + | cmp foldop, 1; je >1; ja >2 + | addsd xmm0, xmm1; ret + |1: ; subsd xmm0, xmm1; ret + |2: ; cmp foldop, 3; je >1; ja >2 + | mulsd xmm0, xmm1; ret + |1: ; divsd xmm0, xmm1; ret + |2: ; cmp foldop, 5; jb ->vm_mod; je ->vm_pow + | cmp foldop, 7; je >1; ja >2 + | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; ret + |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; ret + |2: ; cmp foldop, 9; ja >2 + |.if X64WIN + | movsd qword [rsp+8], xmm0 // Use scratch area. + | movsd qword [rsp+16], xmm1 + | fld qword [rsp+8] + | fld qword [rsp+16] + |.else + | movsd qword [rsp-8], xmm0 // Use red zone. + | movsd qword [rsp-16], xmm1 + | fld qword [rsp-8] + | fld qword [rsp-16] + |.endif + | je >1 + | fpatan + |7: + |.if X64WIN + | fstp qword [rsp+8] // Use scratch area. + | movsd xmm0, qword [rsp+8] + |.else + | fstp qword [rsp-8] // Use red zone. + | movsd xmm0, qword [rsp-8] + |.endif + | ret + |1: ; fxch; fscale; fpop1; jmp <7 + |2: ; cmp foldop, 11; je >1; ja >9 + | minsd xmm0, xmm1; ret + |1: ; maxsd xmm0, xmm1; ret + |9: ; int3 // Bad op. + | + |.else // x86 calling convention. + | + | .define foldop, eax + | mov foldop, [esp+20] + | movsd xmm0, qword [esp+4] + | movsd xmm1, qword [esp+12] + | cmp foldop, 1; je >1; ja >2 + | addsd xmm0, xmm1 + |7: + | movsd qword [esp+4], xmm0 // Overwrite callee-owned args. + | fld qword [esp+4] + | ret + |1: ; subsd xmm0, xmm1; jmp <7 + |2: ; cmp foldop, 3; je >1; ja >2 + | mulsd xmm0, xmm1; jmp <7 + |1: ; divsd xmm0, xmm1; jmp <7 + |2: ; cmp foldop, 5 + | je >1; ja >2 + | call ->vm_mod; jmp <7 + |1: ; pop edx; call ->vm_pow; push edx; jmp <7 // Writes to scratch area. + |2: ; cmp foldop, 7; je >1; ja >2 + | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; jmp <7 + |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; jmp <7 + |2: ; cmp foldop, 9; ja >2 + | fld qword [esp+4] // Reload from stack + | fld qword [esp+12] + | je >1 + | fpatan; ret + |1: ; fxch; fscale; fpop1; ret + |2: ; cmp foldop, 11; je >1; ja >9 + | minsd xmm0, xmm1; jmp <7 + |1: ; maxsd xmm0, xmm1; jmp <7 + |9: ; int3 // Bad op. + | + |.endif + } else { + | mov eax, [esp+20] + | fld qword [esp+4] + | fld qword [esp+12] + | cmp eax, 1; je >1; ja >2 + | faddp st1; ret + |1: ; fsubp st1; ret + |2: ; cmp eax, 3; je >1; ja >2 + | fmulp st1; ret + |1: ; fdivp st1; ret + |2: ; cmp eax, 5; jb ->vm_mod; je ->vm_pow + | cmp eax, 7; je >1; ja >2 + | fpop; fchs; ret + |1: ; fpop; fabs; ret + |2: ; cmp eax, 9; je >1; ja >2 + | fpatan; ret + |1: ; fxch; fscale; fpop1; ret + |2: ; cmp eax, 11; je >1; ja >9 + ||if (cmov) { + | fucomi st1; fcmovnbe st1; fpop1; ret + |1: ; fucomi st1; fcmovbe st1; fpop1; ret + ||} else { + | fucom st1; fnstsw ax; test ah, 1; jz >2; fxch; 2: ; fpop; ret + |1: ; fucom st1; fnstsw ax; test ah, 1; jnz >2; fxch; 2: ; fpop; ret + ||} + |9: ; int3 // Bad op. + } + | + |//----------------------------------------------------------------------- + |//-- Miscellaneous functions -------------------------------------------- + |//----------------------------------------------------------------------- + | + |// int lj_vm_cpuid(uint32_t f, uint32_t res[4]) + |->vm_cpuid: + |.if X64 + | mov eax, CARG1d + | .if X64WIN; push rsi; mov rsi, CARG2; .endif + | push rbx + | cpuid + | mov [rsi], eax + | mov [rsi+4], ebx + | mov [rsi+8], ecx + | mov [rsi+12], edx + | pop rbx + | .if X64WIN; pop rsi; .endif + | ret + |.else + | pushfd + | pop edx + | mov ecx, edx + | xor edx, 0x00200000 // Toggle ID bit in flags. + | push edx + | popfd + | pushfd + | pop edx + | xor eax, eax // Zero means no features supported. + | cmp ecx, edx + | jz >1 // No ID toggle means no CPUID support. + | mov eax, [esp+4] // Argument 1 is function number. + | push edi + | push ebx + | cpuid + | mov edi, [esp+16] // Argument 2 is result area. + | mov [edi], eax + | mov [edi+4], ebx + | mov [edi+8], ecx + | mov [edi+12], edx + | pop ebx + | pop edi + |1: + | ret + |.endif + | + |//----------------------------------------------------------------------- + |//-- FFI helper functions ----------------------------------------------- + |//----------------------------------------------------------------------- + | + |->vm_ffi_call@4: +#if LJ_HASFFI + |.if X64 + | .type CCSTATE, CCallState, rbx + | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1 + |.else + | .type CCSTATE, CCallState, ebx + | push ebp; mov ebp, esp; push ebx; mov CCSTATE, FCARG1 + |.endif + | + | // Readjust stack. + |.if X64 + | mov eax, CCSTATE->spadj + | sub rsp, rax + |.else + | sub esp, CCSTATE->spadj +#if LJ_TARGET_WINDOWS + | mov CCSTATE->spadj, esp +#endif + |.endif + | + | // Copy stack slots. + | movzx ecx, byte CCSTATE->nsp + | sub ecx, 1 + | js >2 + |1: + |.if X64 + | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)] + | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax + |.else + | mov eax, [CCSTATE+ecx*4+offsetof(CCallState, stack)] + | mov [esp+ecx*4], eax + |.endif + | sub ecx, 1 + | jns <1 + |2: + | + |.if X64 + | movzx eax, byte CCSTATE->nfpr + | mov CARG1, CCSTATE->gpr[0] + | mov CARG2, CCSTATE->gpr[1] + | mov CARG3, CCSTATE->gpr[2] + | mov CARG4, CCSTATE->gpr[3] + |.if not X64WIN + | mov CARG5, CCSTATE->gpr[4] + | mov CARG6, CCSTATE->gpr[5] + |.endif + | test eax, eax; jz >5 + | movaps xmm0, CCSTATE->fpr[0] + | movaps xmm1, CCSTATE->fpr[1] + | movaps xmm2, CCSTATE->fpr[2] + | movaps xmm3, CCSTATE->fpr[3] + |.if not X64WIN + | cmp eax, 4; jbe >5 + | movaps xmm4, CCSTATE->fpr[4] + | movaps xmm5, CCSTATE->fpr[5] + | movaps xmm6, CCSTATE->fpr[6] + | movaps xmm7, CCSTATE->fpr[7] + |.endif + |5: + |.else + | mov FCARG1, CCSTATE->gpr[0] + | mov FCARG2, CCSTATE->gpr[1] + |.endif + | + | call aword CCSTATE->func + | + |.if X64 + | mov CCSTATE->gpr[0], rax + | movaps CCSTATE->fpr[0], xmm0 + |.if not X64WIN + | mov CCSTATE->gpr[1], rdx + | movaps CCSTATE->fpr[1], xmm1 + |.endif + |.else + | mov CCSTATE->gpr[0], eax + | mov CCSTATE->gpr[1], edx + | cmp byte CCSTATE->resx87, 1 + | jb >7 + | je >6 + | fstp qword CCSTATE->fpr[0].d[0] + | jmp >7 + |6: + | fstp dword CCSTATE->fpr[0].f[0] + |7: +#if LJ_TARGET_WINDOWS + | sub CCSTATE->spadj, esp +#endif + |.endif + | + |.if X64 + | mov rbx, [rbp-8]; leave; ret + |.else + | mov ebx, [ebp-4]; leave; ret + |.endif +#endif + | + |//----------------------------------------------------------------------- + |//-- Assertions --------------------------------------------------------- + |//----------------------------------------------------------------------- + | + |->assert_bad_for_arg_type: +#ifdef LUA_USE_ASSERT + | int3 +#endif + | int3 + | + |//----------------------------------------------------------------------- +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse) +{ + int vk = 0; + |// Note: aligning all instructions does not pay off. + |=>defop: + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + |.macro jmp_comp, lt, ge, le, gt, target + ||switch (op) { + ||case BC_ISLT: + | lt target + ||break; + ||case BC_ISGE: + | ge target + ||break; + ||case BC_ISLE: + | le target + ||break; + ||case BC_ISGT: + | gt target + ||break; + ||default: break; /* Shut up GCC. */ + ||} + |.endmacro + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + | // RA = src1, RD = src2, JMP with RD = target + | ins_AD + if (LJ_DUALNUM) { + | checkint RA, >7 + | checkint RD, >8 + | mov RB, dword [BASE+RA*8] + | add PC, 4 + | cmp RB, dword [BASE+RD*8] + | jmp_comp jge, jl, jg, jle, >9 + |6: + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja ->vmeta_comp + | // RA is a number. + | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp + | // RA is a number, RD is an integer. + if (sse) { + | cvtsi2sd xmm0, dword [BASE+RD*8] + | jmp >2 + } else { + | fld qword [BASE+RA*8] + | fild dword [BASE+RD*8] + | jmp >3 + } + | + |8: // RA is an integer, RD is not an integer. + | ja ->vmeta_comp + | // RA is an integer, RD is a number. + if (sse) { + | cvtsi2sd xmm1, dword [BASE+RA*8] + | movsd xmm0, qword [BASE+RD*8] + | add PC, 4 + | ucomisd xmm0, xmm1 + | jmp_comp jbe, ja, jb, jae, <9 + | jmp <6 + } else { + | fild dword [BASE+RA*8] + | jmp >2 + } + } else { + | checknum RA, ->vmeta_comp + | checknum RD, ->vmeta_comp + } + if (sse) { + |1: + | movsd xmm0, qword [BASE+RD*8] + |2: + | add PC, 4 + | ucomisd xmm0, qword [BASE+RA*8] + |3: + } else { + |1: + | fld qword [BASE+RA*8] // Reverse order, i.e like cmp D, A. + |2: + | fld qword [BASE+RD*8] + |3: + | add PC, 4 + | fcomparepp // eax (RD) modified! + } + | // Unordered: all of ZF CF PF set, ordered: PF clear. + | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. + if (LJ_DUALNUM) { + | jmp_comp jbe, ja, jb, jae, <9 + | jmp <6 + } else { + | jmp_comp jbe, ja, jb, jae, >1 + | movzx RD, PC_RD + | branchPC RD + |1: + | ins_next + } + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + | ins_AD // RA = src1, RD = src2, JMP with RD = target + | mov RB, [BASE+RD*8+4] + | add PC, 4 + if (LJ_DUALNUM) { + | cmp RB, LJ_TISNUM; jne >7 + | checkint RA, >8 + | mov RB, dword [BASE+RD*8] + | cmp RB, dword [BASE+RA*8] + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RD is not an integer. + | ja >5 + | // RD is a number. + | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5 + | // RD is a number, RA is an integer. + if (sse) { + | cvtsi2sd xmm0, dword [BASE+RA*8] + } else { + | fild dword [BASE+RA*8] + } + | jmp >2 + | + |8: // RD is an integer, RA is not an integer. + | ja >5 + | // RD is an integer, RA is a number. + if (sse) { + | cvtsi2sd xmm0, dword [BASE+RD*8] + | ucomisd xmm0, qword [BASE+RA*8] + } else { + | fild dword [BASE+RD*8] + | fld qword [BASE+RA*8] + } + | jmp >4 + | + } else { + | cmp RB, LJ_TISNUM; jae >5 + | checknum RA, >5 + } + if (sse) { + |1: + | movsd xmm0, qword [BASE+RA*8] + |2: + | ucomisd xmm0, qword [BASE+RD*8] + |4: + } else { + |1: + | fld qword [BASE+RA*8] + |2: + | fld qword [BASE+RD*8] + |4: + | fcomparepp // eax (RD) modified! + } + iseqne_fp: + if (vk) { + | jp >2 // Unordered means not equal. + | jne >2 + } else { + | jp >2 // Unordered means not equal. + | je >1 + } + iseqne_end: + if (vk) { + |1: // EQ: Branch to the target. + | movzx RD, PC_RD + | branchPC RD + |2: // NE: Fallthrough to next instruction. + if (!LJ_HASFFI) { + |3: + } + } else { + if (!LJ_HASFFI) { + |3: + } + |2: // NE: Branch to the target. + | movzx RD, PC_RD + | branchPC RD + |1: // EQ: Fallthrough to next instruction. + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + | jmp <9 + } else { + | ins_next + } + | + if (op == BC_ISEQV || op == BC_ISNEV) { + |5: // Either or both types are not numbers. + if (LJ_HASFFI) { + | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd + | checktp RA, LJ_TCDATA; je ->vmeta_equal_cd + } + | checktp RA, RB // Compare types. + | jne <2 // Not the same type? + | cmp RB, LJ_TISPRI + | jae <1 // Same type and primitive type? + | + | // Same types and not a primitive type. Compare GCobj or pvalue. + | mov RA, [BASE+RA*8] + | mov RD, [BASE+RD*8] + | cmp RA, RD + | je <1 // Same GCobjs or pvalues? + | cmp RB, LJ_TISTABUD + | ja <2 // Different objects and not table/ud? + | + | // Different tables or userdatas. Need to check __eq metamethod. + | // Field metatable must be at same offset for GCtab and GCudata! + | mov TAB:RB, TAB:RA->metatable + | test TAB:RB, TAB:RB + | jz <2 // No metatable? + | test byte TAB:RB->nomm, 1<<MM_eq + | jnz <2 // Or 'no __eq' flag set? + if (vk) { + | xor RB, RB // ne = 0 + } else { + | mov RB, 1 // ne = 1 + } + | jmp ->vmeta_equal // Handle __eq metamethod. + } else if (LJ_HASFFI) { + |3: + | cmp RB, LJ_TCDATA + if (LJ_DUALNUM && vk) { + | jne <9 + } else { + | jne <2 + } + | jmp ->vmeta_equal_cd + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + | ins_AND // RA = src, RD = str const, JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + | cmp RB, LJ_TSTR; jne >3 + | mov RA, [BASE+RA*8] + | cmp RA, [KBASE+RD*4] + iseqne_test: + if (vk) { + | jne >2 + } else { + | je >1 + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + | ins_AD // RA = src, RD = num const, JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + if (LJ_DUALNUM) { + | cmp RB, LJ_TISNUM; jne >7 + | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jne >8 + | mov RB, dword [KBASE+RD*8] + | cmp RB, dword [BASE+RA*8] + if (vk) { + | jne >9 + } else { + | je >9 + } + | movzx RD, PC_RD + | branchPC RD + |9: + | ins_next + | + |7: // RA is not an integer. + | ja >3 + | // RA is a number. + | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1 + | // RA is a number, RD is an integer. + if (sse) { + | cvtsi2sd xmm0, dword [KBASE+RD*8] + } else { + | fild dword [KBASE+RD*8] + } + | jmp >2 + | + |8: // RA is an integer, RD is a number. + if (sse) { + | cvtsi2sd xmm0, dword [BASE+RA*8] + | ucomisd xmm0, qword [KBASE+RD*8] + } else { + | fild dword [BASE+RA*8] + | fld qword [KBASE+RD*8] + } + | jmp >4 + } else { + | cmp RB, LJ_TISNUM; jae >3 + } + if (sse) { + |1: + | movsd xmm0, qword [KBASE+RD*8] + |2: + | ucomisd xmm0, qword [BASE+RA*8] + |4: + } else { + |1: + | fld qword [KBASE+RD*8] + |2: + | fld qword [BASE+RA*8] + |4: + | fcomparepp // eax (RD) modified! + } + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target + | mov RB, [BASE+RA*8+4] + | add PC, 4 + | cmp RB, RD + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + | jne >3 + | movzx RD, PC_RD + | branchPC RD + |2: + | ins_next + |3: + | cmp RB, LJ_TCDATA; jne <2 + | jmp ->vmeta_equal_cd + } else { + | je >2 + | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd + | movzx RD, PC_RD + | branchPC RD + |2: + | ins_next + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + | ins_AD // RA = dst or unused, RD = src, JMP with RD = target + | mov RB, [BASE+RD*8+4] + | add PC, 4 + | cmp RB, LJ_TISTRUECOND + if (op == BC_IST || op == BC_ISTC) { + | jae >1 + } else { + | jb >1 + } + if (op == BC_ISTC || op == BC_ISFC) { + | mov [BASE+RA*8+4], RB + | mov RB, [BASE+RD*8] + | mov [BASE+RA*8], RB + } + | movzx RD, PC_RD + | branchPC RD + |1: // Fallthrough to the next instruction. + | ins_next + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + | ins_AD // RA = dst, RD = src + |.if X64 + | mov RBa, [BASE+RD*8] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [BASE+RD*8+4] + | mov RD, [BASE+RD*8] + | mov [BASE+RA*8+4], RB + | mov [BASE+RA*8], RD + |.endif + | ins_next_ + break; + case BC_NOT: + | ins_AD // RA = dst, RD = src + | xor RB, RB + | checktp RD, LJ_TISTRUECOND + | adc RB, LJ_TTRUE + | mov [BASE+RA*8+4], RB + | ins_next + break; + case BC_UNM: + | ins_AD // RA = dst, RD = src + if (LJ_DUALNUM) { + | checkint RD, >5 + | mov RB, [BASE+RD*8] + | neg RB + | jo >4 + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RB + |9: + | ins_next + |4: + | mov dword [BASE+RA*8+4], 0x41e00000 // 2^31. + | mov dword [BASE+RA*8], 0 + | jmp <9 + |5: + | ja ->vmeta_unm + } else { + | checknum RD, ->vmeta_unm + } + if (sse) { + | movsd xmm0, qword [BASE+RD*8] + | sseconst_sign xmm1, RDa + | xorps xmm0, xmm1 + | movsd qword [BASE+RA*8], xmm0 + } else { + | fld qword [BASE+RD*8] + | fchs + | fstp qword [BASE+RA*8] + } + if (LJ_DUALNUM) { + | jmp <9 + } else { + | ins_next + } + break; + case BC_LEN: + | ins_AD // RA = dst, RD = src + | checkstr RD, >2 + | mov STR:RD, [BASE+RD*8] + if (LJ_DUALNUM) { + | mov RD, dword STR:RD->len + |1: + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RD + } else if (sse) { + | xorps xmm0, xmm0 + | cvtsi2sd xmm0, dword STR:RD->len + |1: + | movsd qword [BASE+RA*8], xmm0 + } else { + | fild dword STR:RD->len + |1: + | fstp qword [BASE+RA*8] + } + | ins_next + |2: + | checktab RD, ->vmeta_len + | mov TAB:FCARG1, [BASE+RD*8] +#ifdef LUAJIT_ENABLE_LUA52COMPAT + | mov TAB:RB, TAB:FCARG1->metatable + | cmp TAB:RB, 0 + | jnz >9 + |3: +#endif + |->BC_LEN_Z: + | mov RB, BASE // Save BASE. + | call extern lj_tab_len@4 // (GCtab *t) + | // Length of table returned in eax (RD). + if (LJ_DUALNUM) { + | // Nothing to do. + } else if (sse) { + | cvtsi2sd xmm0, RD + } else { + |.if not X64 + | mov ARG1, RD + | fild ARG1 + |.endif + } + | mov BASE, RB // Restore BASE. + | movzx RA, PC_RA + | jmp <1 +#ifdef LUAJIT_ENABLE_LUA52COMPAT + |9: // Check for __len. + | test byte TAB:RB->nomm, 1<<MM_len + | jnz <3 + | jmp ->vmeta_len // 'no __len' flag NOT set: check. +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + |.macro ins_arithpre, x87ins, sseins, ssereg + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | checknum RB, ->vmeta_arith_vn + ||if (LJ_DUALNUM) { + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn + ||} + ||if (sse) { + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [KBASE+RC*8] + ||} else { + | fld qword [BASE+RB*8] + | x87ins qword [KBASE+RC*8] + ||} + || break; + ||case 1: + | checknum RB, ->vmeta_arith_nv + ||if (LJ_DUALNUM) { + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv + ||} + ||if (sse) { + | movsd xmm0, qword [KBASE+RC*8] + | sseins ssereg, qword [BASE+RB*8] + ||} else { + | fld qword [KBASE+RC*8] + | x87ins qword [BASE+RB*8] + ||} + || break; + ||default: + | checknum RB, ->vmeta_arith_vv + | checknum RC, ->vmeta_arith_vv + ||if (sse) { + | movsd xmm0, qword [BASE+RB*8] + | sseins ssereg, qword [BASE+RC*8] + ||} else { + | fld qword [BASE+RB*8] + | x87ins qword [BASE+RC*8] + ||} + || break; + ||} + |.endmacro + | + |.macro ins_arithdn, intins + | ins_ABC + ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + ||switch (vk) { + ||case 0: + | checkint RB, ->vmeta_arith_vn + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_vn + | mov RB, [BASE+RB*8] + | intins RB, [KBASE+RC*8]; jo ->vmeta_arith_vno + || break; + ||case 1: + | checkint RB, ->vmeta_arith_nv + | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_nv + | mov RC, [KBASE+RC*8] + | intins RC, [BASE+RB*8]; jo ->vmeta_arith_nvo + || break; + ||default: + | checkint RB, ->vmeta_arith_vv + | checkint RC, ->vmeta_arith_vv + | mov RB, [BASE+RB*8] + | intins RB, [BASE+RC*8]; jo ->vmeta_arith_vvo + || break; + ||} + | mov dword [BASE+RA*8+4], LJ_TISNUM + ||if (vk == 1) { + | mov dword [BASE+RA*8], RC + ||} else { + | mov dword [BASE+RA*8], RB + ||} + | ins_next + |.endmacro + | + |.macro ins_arithpost + ||if (sse) { + | movsd qword [BASE+RA*8], xmm0 + ||} else { + | fstp qword [BASE+RA*8] + ||} + |.endmacro + | + |.macro ins_arith, x87ins, sseins + | ins_arithpre x87ins, sseins, xmm0 + | ins_arithpost + | ins_next + |.endmacro + | + |.macro ins_arith, intins, x87ins, sseins + ||if (LJ_DUALNUM) { + | ins_arithdn intins + ||} else { + | ins_arith, x87ins, sseins + ||} + |.endmacro + + | // RA = dst, RB = src1 or num const, RC = src2 or num const + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + | ins_arith add, fadd, addsd + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + | ins_arith sub, fsub, subsd + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + | ins_arith imul, fmul, mulsd + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + | ins_arith fdiv, divsd + break; + case BC_MODVN: + | ins_arithpre fld, movsd, xmm1 + |->BC_MODVN_Z: + | call ->vm_mod + | ins_arithpost + | ins_next + break; + case BC_MODNV: case BC_MODVV: + | ins_arithpre fld, movsd, xmm1 + | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. + break; + case BC_POW: + | ins_arithpre fld, movsd, xmm1 + | call ->vm_pow + | ins_arithpost + | ins_next + break; + + case BC_CAT: + | ins_ABC // RA = dst, RB = src_start, RC = src_end + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | lea CARG2d, [BASE+RC*8] + | mov CARG3d, RC + | sub CARG3d, RB + |->BC_CAT_Z: + | mov L:RB, L:CARG1d + |.else + | lea RA, [BASE+RC*8] + | sub RC, RB + | mov ARG2, RA + | mov ARG3, RC + |->BC_CAT_Z: + | mov L:RB, SAVE_L + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_meta_cat // (lua_State *L, TValue *top, int left) + | // NULL (finished) or TValue * (metamethod) returned in eax (RC). + | mov BASE, L:RB->base + | test RC, RC + | jnz ->vmeta_binop + | movzx RB, PC_RB // Copy result to Stk[RA] from Stk[RB]. + | movzx RA, PC_RA + |.if X64 + | mov RCa, [BASE+RB*8] + | mov [BASE+RA*8], RCa + |.else + | mov RC, [BASE+RB*8+4] + | mov RB, [BASE+RB*8] + | mov [BASE+RA*8+4], RC + | mov [BASE+RA*8], RB + |.endif + | ins_next + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + | ins_AND // RA = dst, RD = str const (~) + | mov RD, [KBASE+RD*4] + | mov dword [BASE+RA*8+4], LJ_TSTR + | mov [BASE+RA*8], RD + | ins_next + break; + case BC_KCDATA: +#if LJ_HASFFI + | ins_AND // RA = dst, RD = cdata const (~) + | mov RD, [KBASE+RD*4] + | mov dword [BASE+RA*8+4], LJ_TCDATA + | mov [BASE+RA*8], RD + | ins_next +#endif + break; + case BC_KSHORT: + | ins_AD // RA = dst, RD = signed int16 literal + if (LJ_DUALNUM) { + | movsx RD, RDW + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RD + } else if (sse) { + | movsx RD, RDW // Sign-extend literal. + | cvtsi2sd xmm0, RD + | movsd qword [BASE+RA*8], xmm0 + } else { + | fild PC_RD // Refetch signed RD from instruction. + | fstp qword [BASE+RA*8] + } + | ins_next + break; + case BC_KNUM: + | ins_AD // RA = dst, RD = num const + if (sse) { + | movsd xmm0, qword [KBASE+RD*8] + | movsd qword [BASE+RA*8], xmm0 + } else { + | fld qword [KBASE+RD*8] + | fstp qword [BASE+RA*8] + } + | ins_next + break; + case BC_KPRI: + | ins_AND // RA = dst, RD = primitive type (~) + | mov [BASE+RA*8+4], RD + | ins_next + break; + case BC_KNIL: + | ins_AD // RA = dst_start, RD = dst_end + | lea RA, [BASE+RA*8+12] + | lea RD, [BASE+RD*8+4] + | mov RB, LJ_TNIL + | mov [RA-8], RB // Sets minimum 2 slots. + |1: + | mov [RA], RB + | add RA, 8 + | cmp RA, RD + | jbe <1 + | ins_next + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + | ins_AD // RA = dst, RD = upvalue # + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RD*4+offsetof(GCfuncL, uvptr)] + | mov RB, UPVAL:RB->v + |.if X64 + | mov RDa, [RB] + | mov [BASE+RA*8], RDa + |.else + | mov RD, [RB+4] + | mov RB, [RB] + | mov [BASE+RA*8+4], RD + | mov [BASE+RA*8], RB + |.endif + | ins_next + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + | ins_AD // RA = upvalue #, RD = src + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | cmp byte UPVAL:RB->closed, 0 + | mov RB, UPVAL:RB->v + | mov RA, [BASE+RD*8] + | mov RD, [BASE+RD*8+4] + | mov [RB], RA + | mov [RB+4], RD + | jz >1 + | // Check barrier for closed upvalue. + | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Upvalue is black. Check if new value is collectable and white. + | sub RD, LJ_TISGCV + | cmp RD, LJ_TISNUM - LJ_TISGCV // tvisgcv(v) + | jbe <1 + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v) + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + |.if X64 and not X64WIN + | mov FCARG2, RB + | mov RB, BASE // Save BASE. + |.else + | xchg FCARG2, RB // Save BASE (FCARG2 == BASE). + |.endif + | lea GL:FCARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; +#undef TV2MARKOFS + case BC_USETS: + | ins_AND // RA = upvalue #, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov GCOBJ:RA, [KBASE+RD*4] + | mov RD, UPVAL:RB->v + | mov [RD], GCOBJ:RA + | mov dword [RD+4], LJ_TSTR + | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv) + | jnz >2 + |1: + | ins_next + | + |2: // Check if string is white and ensure upvalue is closed. + | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str) + | jz <1 + | cmp byte UPVAL:RB->closed, 0 + | jz <1 + | // Crossed a write barrier. Move the barrier forward. + | mov RB, BASE // Save BASE (FCARG2 == BASE). + | mov FCARG2, RD + | lea GL:FCARG1, [DISPATCH+GG_DISP2G] + | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv) + | mov BASE, RB // Restore BASE. + | jmp <1 + break; + case BC_USETN: + | ins_AD // RA = upvalue #, RD = num const + | mov LFUNC:RB, [BASE-8] + if (sse) { + | movsd xmm0, qword [KBASE+RD*8] + } else { + | fld qword [KBASE+RD*8] + } + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov RA, UPVAL:RB->v + if (sse) { + | movsd qword [RA], xmm0 + } else { + | fstp qword [RA] + } + | ins_next + break; + case BC_USETP: + | ins_AND // RA = upvalue #, RD = primitive type (~) + | mov LFUNC:RB, [BASE-8] + | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] + | mov RA, UPVAL:RB->v + | mov [RA+4], RD + | ins_next + break; + case BC_UCLO: + | ins_AD // RA = level, RD = target + | branchPC RD // Do this first to free RD. + | mov L:RB, SAVE_L + | cmp dword L:RB->openupval, 0 + | je >1 + | mov L:RB->base, BASE + | lea FCARG2, [BASE+RA*8] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | call extern lj_func_closeuv@8 // (lua_State *L, TValue *level) + | mov BASE, L:RB->base + |1: + | ins_next + break; + + case BC_FNEW: + | ins_AND // RA = dst, RD = proto const (~) (holding function prototype) + |.if X64 + | mov L:RB, SAVE_L + | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG3d, [BASE-8] + | mov CARG2d, [KBASE+RD*4] // Fetch GCproto *. + | mov CARG1d, L:RB + |.else + | mov LFUNC:RA, [BASE-8] + | mov PROTO:RD, [KBASE+RD*4] // Fetch GCproto *. + | mov L:RB, SAVE_L + | mov ARG3, LFUNC:RA + | mov ARG2, PROTO:RD + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | // (lua_State *L, GCproto *pt, GCfuncL *parent) + | call extern lj_func_newL_gc + | // GCfuncL * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], LFUNC:RC + | mov dword [BASE+RA*8+4], LJ_TFUNC + | ins_next + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + | ins_AD // RA = dst, RD = hbits|asize + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov SAVE_PC, PC + | jae >5 + |1: + |.if X64 + | mov CARG3d, RD + | and RD, 0x7ff + | shr CARG3d, 11 + |.else + | mov RA, RD + | and RD, 0x7ff + | shr RA, 11 + | mov ARG3, RA + |.endif + | cmp RD, 0x7ff + | je >3 + |2: + |.if X64 + | mov L:CARG1d, L:RB + | mov CARG2d, RD + |.else + | mov ARG1, L:RB + | mov ARG2, RD + |.endif + | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], TAB:RC + | mov dword [BASE+RA*8+4], LJ_TTAB + | ins_next + |3: // Turn 0x7ff into 0x801. + | mov RD, 0x801 + | jmp <2 + |5: + | mov L:FCARG1, L:RB + | call extern lj_gc_step_fixtop@4 // (lua_State *L) + | movzx RD, PC_RD + | jmp <1 + break; + case BC_TDUP: + | ins_AND // RA = dst, RD = table const (~) (holding template table) + | mov L:RB, SAVE_L + | mov RA, [DISPATCH+DISPATCH_GL(gc.total)] + | mov SAVE_PC, PC + | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)] + | mov L:RB->base, BASE + | jae >3 + |2: + | mov TAB:FCARG2, [KBASE+RD*4] // Caveat: FCARG2 == BASE + | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA + | call extern lj_tab_dup@8 // (lua_State *L, Table *kt) + | // Table * returned in eax (RC). + | mov BASE, L:RB->base + | movzx RA, PC_RA + | mov [BASE+RA*8], TAB:RC + | mov dword [BASE+RA*8+4], LJ_TTAB + | ins_next + |3: + | mov L:FCARG1, L:RB + | call extern lj_gc_step_fixtop@4 // (lua_State *L) + | movzx RD, PC_RD // Need to reload RD. + | not RDa + | jmp <2 + break; + + case BC_GGET: + | ins_AND // RA = dst, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*4] + | jmp ->BC_TGETS_Z + break; + case BC_GSET: + | ins_AND // RA = src, RD = str const (~) + | mov LFUNC:RB, [BASE-8] + | mov TAB:RB, LFUNC:RB->env + | mov STR:RC, [KBASE+RD*4] + | jmp ->BC_TSETS_Z + break; + + case BC_TGETV: + | ins_ABC // RA = dst, RB = table, RC = key + | checktab RB, ->vmeta_tgetv + | mov TAB:RB, [BASE+RB*8] + | + | // Integer key? + if (LJ_DUALNUM) { + | checkint RC, >5 + | mov RC, dword [BASE+RC*8] + } else { + | // Convert number to int and back and compare. + | checknum RC, >5 + if (sse) { + | movsd xmm0, qword [BASE+RC*8] + | cvtsd2si RC, xmm0 + | cvtsi2sd xmm1, RC + | ucomisd xmm0, xmm1 + } else { + |.if not X64 + | fld qword [BASE+RC*8] + | fist ARG1 + | fild ARG1 + | fcomparepp // eax (RC) modified! + | mov RC, ARG1 + |.endif + } + | jne ->vmeta_tgetv // Generic numeric key? Use fallback. + } + | cmp RC, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tgetv // Not in array part? Use fallback. + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >2 + | // Get array slot. + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC] + | mov RC, [RC+4] + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + |.endif + |1: + | ins_next + | + |2: // Check for __index if table value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz >3 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<<MM_index + | jz ->vmeta_tgetv // 'no __index' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + |3: + | mov dword [BASE+RA*8+4], LJ_TNIL + | jmp <1 + | + |5: // String key? + | checkstr RC, ->vmeta_tgetv + | mov STR:RC, [BASE+RC*8] + | jmp ->BC_TGETS_Z + break; + case BC_TGETS: + | ins_ABC // RA = dst, RB = table, RC = str const (~) + | not RCa + | mov STR:RC, [KBASE+RC*4] + | checktab RB, ->vmeta_tgets + | mov TAB:RB, [BASE+RB*8] + |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | add NODE:RA, TAB:RB->node + |1: + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >4 + | cmp dword NODE:RA->key.gcr, STR:RC + | jne >4 + | // Ok, key found. Assumes: offsetof(Node, val) == 0 + | cmp dword [RA+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >5 // Key found, but nil value? + | movzx RC, PC_RA + | // Get node value. + |.if X64 + | mov RBa, [RA] + | mov [BASE+RC*8], RBa + |.else + | mov RB, [RA] + | mov RA, [RA+4] + | mov [BASE+RC*8], RB + | mov [BASE+RC*8+4], RA + |.endif + |2: + | ins_next + | + |3: + | movzx RC, PC_RA + | mov dword [BASE+RC*8+4], LJ_TNIL + | jmp <2 + | + |4: // Follow hash chain. + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <1 + | // End of hash chain: key not found, nil result. + | + |5: // Check for __index if table value is nil. + | mov TAB:RA, TAB:RB->metatable + | test TAB:RA, TAB:RA + | jz <3 // No metatable: done. + | test byte TAB:RA->nomm, 1<<MM_index + | jnz <3 // 'no __index' flag set: done. + | jmp ->vmeta_tgets // Caveat: preserve STR:RC. + break; + case BC_TGETB: + | ins_ABC // RA = dst, RB = table, RC = byte literal + | checktab RB, ->vmeta_tgetb + | mov TAB:RB, [BASE+RB*8] + | cmp RC, TAB:RB->asize + | jae ->vmeta_tgetb + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath. + | je >2 + | // Get array slot. + |.if X64 + | mov RBa, [RC] + | mov [BASE+RA*8], RBa + |.else + | mov RB, [RC] + | mov RC, [RC+4] + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + |.endif + |1: + | ins_next + | + |2: // Check for __index if table value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz >3 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<<MM_index + | jz ->vmeta_tgetb // 'no __index' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + |3: + | mov dword [BASE+RA*8+4], LJ_TNIL + | jmp <1 + break; + + case BC_TSETV: + | ins_ABC // RA = src, RB = table, RC = key + | checktab RB, ->vmeta_tsetv + | mov TAB:RB, [BASE+RB*8] + | + | // Integer key? + if (LJ_DUALNUM) { + | checkint RC, >5 + | mov RC, dword [BASE+RC*8] + } else { + | // Convert number to int and back and compare. + | checknum RC, >5 + if (sse) { + | movsd xmm0, qword [BASE+RC*8] + | cvtsd2si RC, xmm0 + | cvtsi2sd xmm1, RC + | ucomisd xmm0, xmm1 + } else { + |.if not X64 + | fld qword [BASE+RC*8] + | fist ARG1 + | fild ARG1 + | fcomparepp // eax (RC) modified! + | mov RC, ARG1 + |.endif + } + | jne ->vmeta_tsetv // Generic numeric key? Use fallback. + } + | cmp RC, TAB:RB->asize // Takes care of unordered, too. + | jae ->vmeta_tsetv + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + |.if X64 + | mov RBa, [BASE+RA*8] + | mov [RC], RBa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <1 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<<MM_newindex + | jz ->vmeta_tsetv // 'no __newindex' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + | jmp <1 + | + |5: // String key? + | checkstr RC, ->vmeta_tsetv + | mov STR:RC, [BASE+RC*8] + | jmp ->BC_TSETS_Z + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RA + | movzx RA, PC_RA // Restore RA. + | jmp <2 + break; + case BC_TSETS: + | ins_ABC // RA = src, RB = table, RC = str const (~) + | not RCa + | mov STR:RC, [KBASE+RC*4] + | checktab RB, ->vmeta_tsets + | mov TAB:RB, [BASE+RB*8] + |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. + | mov RA, TAB:RB->hmask + | and RA, STR:RC->hash + | imul RA, #NODE + | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. + | add NODE:RA, TAB:RB->node + |1: + | cmp dword NODE:RA->key.it, LJ_TSTR + | jne >5 + | cmp dword NODE:RA->key.gcr, STR:RC + | jne >5 + | // Ok, key found. Assumes: offsetof(Node, val) == 0 + | cmp dword [RA+4], LJ_TNIL + | je >4 // Previous value is nil? + |2: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |3: // Set node value. + | movzx RC, PC_RA + |.if X64 + | mov RBa, [BASE+RC*8] + | mov [RA], RBa + |.else + | mov RB, [BASE+RC*8+4] + | mov RC, [BASE+RC*8] + | mov [RA+4], RB + | mov [RA], RC + |.endif + | ins_next + | + |4: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <2 + | mov TMP1, RA // Save RA. + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<<MM_newindex + | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check. + | mov RA, TMP1 // Restore RA. + | jmp <2 + | + |5: // Follow hash chain. + | mov NODE:RA, NODE:RA->next + | test NODE:RA, NODE:RA + | jnz <1 + | // End of hash chain: key not found, add a new one. + | + | // But check for __newindex first. + | mov TAB:RA, TAB:RB->metatable + | test TAB:RA, TAB:RA + | jz >6 // No metatable: continue. + | test byte TAB:RA->nomm, 1<<MM_newindex + | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check. + |6: + | mov TMP1, STR:RC + | mov TMP2, LJ_TSTR + | mov TMP3, TAB:RB // Save TAB:RB for us. + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE + | lea CARG3, TMP1 + | mov CARG2d, TAB:RB + | mov L:RB, L:CARG1d + |.else + | lea RC, TMP1 // Store temp. TValue in TMP1/TMP2. + | mov ARG2, TAB:RB + | mov L:RB, SAVE_L + | mov ARG3, RC + | mov ARG1, L:RB + | mov L:RB->base, BASE + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) + | // Handles write barrier for the new key. TValue * returned in eax (RC). + | mov BASE, L:RB->base + | mov TAB:RB, TMP3 // Need TAB:RB for barrier. + | mov RA, eax + | jmp <2 // Must check write barrier for value. + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RC // Destroys STR:RC. + | jmp <3 + break; + case BC_TSETB: + | ins_ABC // RA = src, RB = table, RC = byte literal + | checktab RB, ->vmeta_tsetb + | mov TAB:RB, [BASE+RB*8] + | cmp RC, TAB:RB->asize + | jae ->vmeta_tsetb + | shl RC, 3 + | add RC, TAB:RB->array + | cmp dword [RC+4], LJ_TNIL + | je >3 // Previous value is nil? + |1: + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: // Set array slot. + |.if X64 + | mov RAa, [BASE+RA*8] + | mov [RC], RAa + |.else + | mov RB, [BASE+RA*8+4] + | mov RA, [BASE+RA*8] + | mov [RC+4], RB + | mov [RC], RA + |.endif + | ins_next + | + |3: // Check for __newindex if previous value is nil. + | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath. + | jz <1 + | mov TAB:RA, TAB:RB->metatable + | test byte TAB:RA->nomm, 1<<MM_newindex + | jz ->vmeta_tsetb // 'no __newindex' flag NOT set: check. + | movzx RA, PC_RA // Restore RA. + | jmp <1 + | + |7: // Possible table write barrier for the value. Skip valiswhite check. + | barrierback TAB:RB, RA + | movzx RA, PC_RA // Restore RA. + | jmp <2 + break; + + case BC_TSETM: + | ins_AD // RA = base (table at base-1), RD = num const (start index) + | mov TMP1, KBASE // Need one more free register. + | mov KBASE, dword [KBASE+RD*8] // Integer constant is in lo-word. + |1: + | lea RA, [BASE+RA*8] + | mov TAB:RB, [RA-8] // Guaranteed to be a table. + | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table) + | jnz >7 + |2: + | mov RD, MULTRES + | sub RD, 1 + | jz >4 // Nothing to copy? + | add RD, KBASE // Compute needed size. + | cmp RD, TAB:RB->asize + | ja >5 // Doesn't fit into array part? + | sub RD, KBASE + | shl KBASE, 3 + | add KBASE, TAB:RB->array + |3: // Copy result slots to table. + |.if X64 + | mov RBa, [RA] + | add RA, 8 + | mov [KBASE], RBa + |.else + | mov RB, [RA] + | mov [KBASE], RB + | mov RB, [RA+4] + | add RA, 8 + | mov [KBASE+4], RB + |.endif + | add KBASE, 8 + | sub RD, 1 + | jnz <3 + |4: + | mov KBASE, TMP1 + | ins_next + | + |5: // Need to resize array part. + |.if X64 + | mov L:CARG1d, SAVE_L + | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE. + | mov CARG2d, TAB:RB + | mov CARG3d, RD + | mov L:RB, L:CARG1d + |.else + | mov ARG2, TAB:RB + | mov L:RB, SAVE_L + | mov L:RB->base, BASE + | mov ARG3, RD + | mov ARG1, L:RB + |.endif + | mov SAVE_PC, PC + | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize) + | mov BASE, L:RB->base + | movzx RA, PC_RA // Restore RA. + | jmp <1 // Retry. + | + |7: // Possible table write barrier for any value. Skip valiswhite check. + | barrierback TAB:RB, RD + | jmp <2 + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs + if (op == BC_CALLM) { + | add NARGS:RD, MULTRES + } + | cmp dword [BASE+RA*8+4], LJ_TFUNC + | mov LFUNC:RB, [BASE+RA*8] + | jne ->vmeta_call_ra + | lea BASE, [BASE+RA*8+8] + | ins_call + break; + + case BC_CALLMT: + | ins_AD // RA = base, RD = extra_nargs + | add NARGS:RD, MULTRES + | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op. + break; + case BC_CALLT: + | ins_AD // RA = base, RD = nargs+1 + | lea RA, [BASE+RA*8+8] + | mov KBASE, BASE // Use KBASE for move + vmeta_call hint. + | mov LFUNC:RB, [RA-8] + | cmp dword [RA-4], LJ_TFUNC + | jne ->vmeta_call + |->BC_CALLT_Z: + | mov PC, [BASE-4] + | test PC, FRAME_TYPE + | jnz >7 + |1: + | mov [BASE-8], LFUNC:RB // Copy function down, reloaded below. + | mov MULTRES, NARGS:RD + | sub NARGS:RD, 1 + | jz >3 + |2: // Move args down. + |.if X64 + | mov RBa, [RA] + | add RA, 8 + | mov [KBASE], RBa + |.else + | mov RB, [RA] + | mov [KBASE], RB + | mov RB, [RA+4] + | add RA, 8 + | mov [KBASE+4], RB + |.endif + | add KBASE, 8 + | sub NARGS:RD, 1 + | jnz <2 + | + | mov LFUNC:RB, [BASE-8] + |3: + | mov NARGS:RD, MULTRES + | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function? + | ja >5 + |4: + | ins_callt + | + |5: // Tailcall to a fast function. + | test PC, FRAME_TYPE // Lua frame below? + | jnz <4 + | movzx RA, PC_RA + | not RAa + | lea RA, [BASE+RA*8] + | mov LFUNC:KBASE, [RA-8] // Need to prepare KBASE. + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | jmp <4 + | + |7: // Tailcall from a vararg function. + | sub PC, FRAME_VARG + | test PC, FRAME_TYPEP + | jnz >8 // Vararg frame below? + | sub BASE, PC // Need to relocate BASE/KBASE down. + | mov KBASE, BASE + | mov PC, [BASE-4] + | jmp <1 + |8: + | add PC, FRAME_VARG + | jmp <1 + break; + + case BC_ITERC: + | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1) + | lea RA, [BASE+RA*8+8] // fb = base+1 + |.if X64 + | mov RBa, [RA-24] // Copy state. fb[0] = fb[-3]. + | mov RCa, [RA-16] // Copy control var. fb[1] = fb[-2]. + | mov [RA], RBa + | mov [RA+8], RCa + |.else + | mov RB, [RA-24] // Copy state. fb[0] = fb[-3]. + | mov RC, [RA-20] + | mov [RA], RB + | mov [RA+4], RC + | mov RB, [RA-16] // Copy control var. fb[1] = fb[-2]. + | mov RC, [RA-12] + | mov [RA+8], RB + | mov [RA+12], RC + |.endif + | mov LFUNC:RB, [RA-32] // Copy callable. fb[-1] = fb[-4] + | mov RC, [RA-28] + | mov [RA-8], LFUNC:RB + | mov [RA-4], RC + | cmp RC, LJ_TFUNC // Handle like a regular 2-arg call. + | mov NARGS:RD, 2+1 + | jne ->vmeta_call + | mov BASE, RA + | ins_call + break; + + case BC_ITERN: + | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1)) +#if LJ_HASJIT + | // NYI: add hotloop, record BC_ITERN. +#endif + | mov TMP1, KBASE // Need two more free registers. + | mov TMP2, DISPATCH + | mov TAB:RB, [BASE+RA*8-16] + | mov RC, [BASE+RA*8-8] // Get index from control var. + | mov DISPATCH, TAB:RB->asize + | add PC, 4 + | mov KBASE, TAB:RB->array + |1: // Traverse array part. + | cmp RC, DISPATCH; jae >5 // Index points after array part? + | cmp dword [KBASE+RC*8+4], LJ_TNIL; je >4 + if (LJ_DUALNUM) { + | mov dword [BASE+RA*8+4], LJ_TISNUM + | mov dword [BASE+RA*8], RC + } else if (sse) { + | cvtsi2sd xmm0, RC + } else { + | fild dword [BASE+RA*8-8] + } + | // Copy array slot to returned value. + |.if X64 + | mov RBa, [KBASE+RC*8] + | mov [BASE+RA*8+8], RBa + |.else + | mov RB, [KBASE+RC*8+4] + | mov [BASE+RA*8+12], RB + | mov RB, [KBASE+RC*8] + | mov [BASE+RA*8+8], RB + |.endif + | add RC, 1 + | // Return array index as a numeric key. + if (LJ_DUALNUM) { + | // See above. + } else if (sse) { + | movsd qword [BASE+RA*8], xmm0 + } else { + | fstp qword [BASE+RA*8] + } + | mov [BASE+RA*8-8], RC // Update control var. + |2: + | movzx RD, PC_RD // Get target from ITERL. + | branchPC RD + |3: + | mov DISPATCH, TMP2 + | mov KBASE, TMP1 + | ins_next + | + |4: // Skip holes in array part. + | add RC, 1 + if (!LJ_DUALNUM && !sse) { + | mov [BASE+RA*8-8], RC + } + | jmp <1 + | + |5: // Traverse hash part. + | sub RC, DISPATCH + |6: + | cmp RC, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1. + | imul KBASE, RC, #NODE + | add NODE:KBASE, TAB:RB->node + | cmp dword NODE:KBASE->val.it, LJ_TNIL; je >7 + | lea DISPATCH, [RC+DISPATCH+1] + | // Copy key and value from hash slot. + |.if X64 + | mov RBa, NODE:KBASE->key + | mov RCa, NODE:KBASE->val + | mov [BASE+RA*8], RBa + | mov [BASE+RA*8+8], RCa + |.else + | mov RB, NODE:KBASE->key.gcr + | mov RC, NODE:KBASE->key.it + | mov [BASE+RA*8], RB + | mov [BASE+RA*8+4], RC + | mov RB, NODE:KBASE->val.gcr + | mov RC, NODE:KBASE->val.it + | mov [BASE+RA*8+8], RB + | mov [BASE+RA*8+12], RC + |.endif + | mov [BASE+RA*8-8], DISPATCH + | jmp <2 + | + |7: // Skip holes in hash part. + | add RC, 1 + | jmp <6 + break; + + case BC_ISNEXT: + | ins_AD // RA = base, RD = target (points to ITERN) + | cmp dword [BASE+RA*8-20], LJ_TFUNC; jne >5 + | mov CFUNC:RB, [BASE+RA*8-24] + | cmp dword [BASE+RA*8-12], LJ_TTAB; jne >5 + | cmp dword [BASE+RA*8-4], LJ_TNIL; jne >5 + | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5 + | branchPC RD + | mov dword [BASE+RA*8-8], 0 // Initialize control var. + |1: + | ins_next + |5: // Despecialize bytecode if any of the checks fail. + | mov PC_OP, BC_JMP + | branchPC RD + | mov byte [PC], BC_ITERC + | jmp <1 + break; + + case BC_VARG: + | ins_ABC // RA = base, RB = nresults+1, RC = numparams + | mov TMP1, KBASE // Need one more free register. + | lea KBASE, [BASE+RC*8+(8+FRAME_VARG)] + | lea RA, [BASE+RA*8] + | sub KBASE, [BASE-4] + | // Note: KBASE may now be even _above_ BASE if nargs was < numparams. + | test RB, RB + | jz >5 // Copy all varargs? + | lea RB, [RA+RB*8-8] + | cmp KBASE, BASE // No vararg slots? + | jnb >2 + |1: // Copy vararg slots to destination slots. + |.if X64 + | mov RCa, [KBASE-8] + | add KBASE, 8 + | mov [RA], RCa + |.else + | mov RC, [KBASE-8] + | mov [RA], RC + | mov RC, [KBASE-4] + | add KBASE, 8 + | mov [RA+4], RC + |.endif + | add RA, 8 + | cmp RA, RB // All destination slots filled? + | jnb >3 + | cmp KBASE, BASE // No more vararg slots? + | jb <1 + |2: // Fill up remainder with nil. + | mov dword [RA+4], LJ_TNIL + | add RA, 8 + | cmp RA, RB + | jb <2 + |3: + | mov KBASE, TMP1 + | ins_next + | + |5: // Copy all varargs. + | mov MULTRES, 1 // MULTRES = 0+1 + | mov RC, BASE + | sub RC, KBASE + | jbe <3 // No vararg slots? + | mov RB, RC + | shr RB, 3 + | add RB, 1 + | mov MULTRES, RB // MULTRES = #varargs+1 + | mov L:RB, SAVE_L + | add RC, RA + | cmp RC, L:RB->maxstack + | ja >7 // Need to grow stack? + |6: // Copy all vararg slots. + |.if X64 + | mov RCa, [KBASE-8] + | add KBASE, 8 + | mov [RA], RCa + |.else + | mov RC, [KBASE-8] + | mov [RA], RC + | mov RC, [KBASE-4] + | add KBASE, 8 + | mov [RA+4], RC + |.endif + | add RA, 8 + | cmp KBASE, BASE // No more vararg slots? + | jb <6 + | jmp <3 + | + |7: // Grow stack for varargs. + | mov L:RB->base, BASE + | mov L:RB->top, RA + | mov SAVE_PC, PC + | sub KBASE, BASE // Need delta, because BASE may change. + | mov FCARG2, MULTRES + | sub FCARG2, 1 + | mov FCARG1, L:RB + | call extern lj_state_growstack@8 // (lua_State *L, int n) + | mov BASE, L:RB->base + | mov RA, L:RB->top + | add KBASE, BASE + | jmp <6 + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + | ins_AD // RA = results, RD = extra_nresults + | add RD, MULTRES // MULTRES >=1, so RD >=1. + | // Fall through. Assumes BC_RET follows and ins_AD is a no-op. + break; + + case BC_RET: case BC_RET0: case BC_RET1: + | ins_AD // RA = results, RD = nresults+1 + if (op != BC_RET0) { + | shl RA, 3 + } + |1: + | mov PC, [BASE-4] + | mov MULTRES, RD // Save nresults+1. + | test PC, FRAME_TYPE // Check frame type marker. + | jnz >7 // Not returning to a fixarg Lua func? + switch (op) { + case BC_RET: + |->BC_RET_Z: + | mov KBASE, BASE // Use KBASE for result move. + | sub RD, 1 + | jz >3 + |2: // Move results down. + |.if X64 + | mov RBa, [KBASE+RA] + | mov [KBASE-8], RBa + |.else + | mov RB, [KBASE+RA] + | mov [KBASE-8], RB + | mov RB, [KBASE+RA+4] + | mov [KBASE-4], RB + |.endif + | add KBASE, 8 + | sub RD, 1 + | jnz <2 + |3: + | mov RD, MULTRES // Note: MULTRES may be >255. + | movzx RB, PC_RB // So cannot compare with RDL! + |5: + | cmp RB, RD // More results expected? + | ja >6 + break; + case BC_RET1: + |.if X64 + | mov RBa, [BASE+RA] + | mov [BASE-8], RBa + |.else + | mov RB, [BASE+RA+4] + | mov [BASE-4], RB + | mov RB, [BASE+RA] + | mov [BASE-8], RB + |.endif + /* fallthrough */ + case BC_RET0: + |5: + | cmp PC_RB, RDL // More results expected? + | ja >6 + default: + break; + } + | movzx RA, PC_RA + | not RAa // Note: ~RA = -(RA+1) + | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8 + | mov LFUNC:KBASE, [BASE-8] + | mov KBASE, LFUNC:KBASE->pc + | mov KBASE, [KBASE+PC2PROTO(k)] + | ins_next + | + |6: // Fill up results with nil. + if (op == BC_RET) { + | mov dword [KBASE-4], LJ_TNIL // Note: relies on shifted base. + | add KBASE, 8 + } else { + | mov dword [BASE+RD*8-12], LJ_TNIL + } + | add RD, 1 + | jmp <5 + | + |7: // Non-standard return case. + | lea RB, [PC-FRAME_VARG] + | test RB, FRAME_TYPEP + | jnz ->vm_return + | // Return from vararg function: relocate BASE down and RA up. + | sub BASE, RB + if (op != BC_RET0) { + | add RA, RB + } + | jmp <1 + break; + + /* -- Loops and branches ------------------------------------------------ */ + + |.define FOR_IDX, [RA]; .define FOR_TIDX, dword [RA+4] + |.define FOR_STOP, [RA+8]; .define FOR_TSTOP, dword [RA+12] + |.define FOR_STEP, [RA+16]; .define FOR_TSTEP, dword [RA+20] + |.define FOR_EXT, [RA+24]; .define FOR_TEXT, dword [RA+28] + + case BC_FORL: +#if LJ_HASJIT + | hotloop RB +#endif + | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op. + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + | ins_AJ // RA = base, RD = target (after end of loop or start of loop) + | lea RA, [BASE+RA*8] + if (LJ_DUALNUM) { + | cmp FOR_TIDX, LJ_TISNUM; jne >9 + if (!vk) { + | cmp FOR_TSTOP, LJ_TISNUM; jne ->vmeta_for + | cmp FOR_TSTEP, LJ_TISNUM; jne ->vmeta_for + | mov RB, dword FOR_IDX + | cmp dword FOR_STEP, 0; jl >5 + } else { +#ifdef LUA_USE_ASSERT + | cmp FOR_TSTOP, LJ_TISNUM; jne ->assert_bad_for_arg_type + | cmp FOR_TSTEP, LJ_TISNUM; jne ->assert_bad_for_arg_type +#endif + | mov RB, dword FOR_STEP + | test RB, RB; js >5 + | add RB, dword FOR_IDX; jo >1 + | mov dword FOR_IDX, RB + } + | cmp RB, dword FOR_STOP + | mov FOR_TEXT, LJ_TISNUM + | mov dword FOR_EXT, RB + if (op == BC_FORI) { + | jle >7 + |1: + |6: + | branchPC RD + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jle =>BC_JLOOP + |1: + |6: + } else if (op == BC_IFORL) { + | jg >7 + |6: + | branchPC RD + |1: + } else { + | jle =>BC_JLOOP + |1: + |6: + } + |7: + | ins_next + | + |5: // Invert check for negative step. + if (vk) { + | add RB, dword FOR_IDX; jo <1 + | mov dword FOR_IDX, RB + } + | cmp RB, dword FOR_STOP + | mov FOR_TEXT, LJ_TISNUM + | mov dword FOR_EXT, RB + if (op == BC_FORI) { + | jge <7 + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jge =>BC_JLOOP + } else if (op == BC_IFORL) { + | jl <7 + } else { + | jge =>BC_JLOOP + } + | jmp <6 + |9: // Fallback to FP variant. + } else if (!vk) { + | cmp FOR_TIDX, LJ_TISNUM + } + if (!vk) { + | jae ->vmeta_for + | cmp FOR_TSTOP, LJ_TISNUM; jae ->vmeta_for + } else { +#ifdef LUA_USE_ASSERT + | cmp FOR_TSTOP, LJ_TISNUM; jae ->assert_bad_for_arg_type + | cmp FOR_TSTEP, LJ_TISNUM; jae ->assert_bad_for_arg_type +#endif + } + | mov RB, FOR_TSTEP // Load type/hiword of for step. + if (!vk) { + | cmp RB, LJ_TISNUM; jae ->vmeta_for + } + if (sse) { + | movsd xmm0, qword FOR_IDX + | movsd xmm1, qword FOR_STOP + if (vk) { + | addsd xmm0, qword FOR_STEP + | movsd qword FOR_IDX, xmm0 + | test RB, RB; js >3 + } else { + | jl >3 + } + | ucomisd xmm1, xmm0 + |1: + | movsd qword FOR_EXT, xmm0 + } else { + | fld qword FOR_STOP + | fld qword FOR_IDX + if (vk) { + | fadd qword FOR_STEP // nidx = idx + step + | fst qword FOR_IDX + | fst qword FOR_EXT + | test RB, RB; js >1 + } else { + | fst qword FOR_EXT + | jl >1 + } + | fxch // Swap lim/(n)idx if step non-negative. + |1: + | fcomparepp // eax (RD) modified if !cmov. + if (!cmov) { + | movzx RD, PC_RD // Need to reload RD. + } + } + if (op == BC_FORI) { + if (LJ_DUALNUM) { + | jnb <7 + } else { + | jnb >2 + | branchPC RD + } + } else if (op == BC_JFORI) { + | branchPC RD + | movzx RD, PC_RD + | jnb =>BC_JLOOP + } else if (op == BC_IFORL) { + if (LJ_DUALNUM) { + | jb <7 + } else { + | jb >2 + | branchPC RD + } + } else { + | jnb =>BC_JLOOP + } + if (LJ_DUALNUM) { + | jmp <6 + } else { + |2: + | ins_next + } + if (sse) { + |3: // Invert comparison if step is negative. + | ucomisd xmm0, xmm1 + | jmp <1 + } + break; + + case BC_ITERL: +#if LJ_HASJIT + | hotloop RB +#endif + | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op. + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + | ins_AJ // RA = base, RD = target + | lea RA, [BASE+RA*8] + | mov RB, [RA+4] + | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil. + if (op == BC_JITERL) { + | mov [RA-4], RB + | mov RB, [RA] + | mov [RA-8], RB + | jmp =>BC_JLOOP + } else { + | branchPC RD // Otherwise save control var + branch. + | mov RD, [RA] + | mov [RA-4], RB + | mov [RA-8], RD + } + |1: + | ins_next + break; + + case BC_LOOP: + | ins_A // RA = base, RD = target (loop extent) + | // Note: RA/RD is only used by trace recorder to determine scope/extent + | // This opcode does NOT jump, it's only purpose is to detect a hot loop. +#if LJ_HASJIT + | hotloop RB +#endif + | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. + break; + + case BC_ILOOP: + | ins_A // RA = base, RD = target (loop extent) + | ins_next + break; + + case BC_JLOOP: +#if LJ_HASJIT + | ins_AD // RA = base (ignored), RD = traceno + | mov RA, [DISPATCH+DISPATCH_J(trace)] + | mov TRACE:RD, [RA+RD*4] + | mov RDa, TRACE:RD->mcode + | mov L:RB, SAVE_L + | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE + | mov [DISPATCH+DISPATCH_GL(jit_L)], L:RB + | // Save additional callee-save registers only used in compiled code. + |.if X64WIN + | mov TMPQ, r12 + | mov TMPa, r13 + | mov CSAVE_4, r14 + | mov CSAVE_3, r15 + | mov RAa, rsp + | sub rsp, 9*16+4*8 + | movdqa [RAa], xmm6 + | movdqa [RAa-1*16], xmm7 + | movdqa [RAa-2*16], xmm8 + | movdqa [RAa-3*16], xmm9 + | movdqa [RAa-4*16], xmm10 + | movdqa [RAa-5*16], xmm11 + | movdqa [RAa-6*16], xmm12 + | movdqa [RAa-7*16], xmm13 + | movdqa [RAa-8*16], xmm14 + | movdqa [RAa-9*16], xmm15 + |.elif X64 + | mov TMPQ, r12 + | mov TMPa, r13 + | sub rsp, 16 + |.endif + | jmp RDa +#endif + break; + + case BC_JMP: + | ins_AJ // RA = unused, RD = target + | branchPC RD + | ins_next + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: +#if LJ_HASJIT + | hotcall RB +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op. + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | mov KBASE, [PC-4+PC2PROTO(k)] + | mov L:RB, SAVE_L + | lea RA, [BASE+RA*8] // Top of frame. + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_f + | movzx RA, byte [PC-4+PC2PROTO(numparams)] + | cmp NARGS:RD, RA // Check for missing parameters. + | jbe >3 + |2: + if (op == BC_JFUNCF) { + | movzx RD, PC_RD + | jmp =>BC_JLOOP + } else { + | ins_next + } + | + |3: // Clear missing parameters. + | mov dword [BASE+NARGS:RD*8-4], LJ_TNIL + | add NARGS:RD, 1 + | cmp NARGS:RD, RA + | jbe <3 + | jmp <2 + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + | int3 // NYI: compiled vararg functions + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + | ins_AD // BASE = new base, RA = framesize, RD = nargs+1 + | lea RB, [NARGS:RD*8+FRAME_VARG] + | lea RD, [BASE+NARGS:RD*8] + | mov LFUNC:KBASE, [BASE-8] + | mov [RD-4], RB // Store delta + FRAME_VARG. + | mov [RD-8], LFUNC:KBASE // Store copy of LFUNC. + | mov L:RB, SAVE_L + | lea RA, [RD+RA*8] + | cmp RA, L:RB->maxstack + | ja ->vm_growstack_v // Need to grow stack. + | mov RA, BASE + | mov BASE, RD + | movzx RB, byte [PC-4+PC2PROTO(numparams)] + | test RB, RB + | jz >2 + |1: // Copy fixarg slots up to new frame. + | add RA, 8 + | cmp RA, BASE + | jnb >3 // Less args than parameters? + | mov KBASE, [RA-8] + | mov [RD], KBASE + | mov KBASE, [RA-4] + | mov [RD+4], KBASE + | add RD, 8 + | mov dword [RA-4], LJ_TNIL // Clear old fixarg slot (help the GC). + | sub RB, 1 + | jnz <1 + |2: + if (op == BC_JFUNCV) { + | movzx RD, PC_RD + | jmp =>BC_JLOOP + } else { + | mov KBASE, [PC-4+PC2PROTO(k)] + | ins_next + } + | + |3: // Clear missing parameters. + | mov dword [RD+4], LJ_TNIL + | add RD, 8 + | sub RB, 1 + | jnz <3 + | jmp <2 + break; + + case BC_FUNCC: + case BC_FUNCCW: + | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1 + | mov CFUNC:RB, [BASE-8] + | mov KBASEa, CFUNC:RB->f + | mov L:RB, SAVE_L + | lea RD, [BASE+NARGS:RD*8-8] + | mov L:RB->base, BASE + | lea RA, [RD+8*LUA_MINSTACK] + | cmp RA, L:RB->maxstack + | mov L:RB->top, RD + if (op == BC_FUNCC) { + |.if X64 + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | mov ARG1, L:RB + |.endif + } else { + |.if X64 + | mov CARG2, KBASEa + | mov CARG1d, L:RB // Caveat: CARG1d may be RA. + |.else + | mov ARG2, KBASEa + | mov ARG1, L:RB + |.endif + } + | ja ->vm_growstack_c // Need to grow stack. + | set_vmstate C + if (op == BC_FUNCC) { + | call KBASEa // (lua_State *L) + } else { + | // (lua_State *L, lua_CFunction f) + | call aword [DISPATCH+DISPATCH_GL(wrapf)] + } + | set_vmstate INTERP + | // nresults returned in eax (RD). + | mov BASE, L:RB->base + | lea RA, [BASE+RD*8] + | neg RA + | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 + | mov PC, [BASE-4] // Fetch PC of caller. + | jmp ->vm_returnc + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + int cmov = 1; + int sse = 0; +#ifdef LUAJIT_CPU_NOCMOV + cmov = 0; +#endif +#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64) + sse = 1; +#endif + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx, cmov, sse); + + |.code_op + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op, cmov, sse); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ +#if LJ_64 +#define SZPTR "8" +#define BSZPTR "3" +#define REG_SP "0x7" +#define REG_RA "0x10" +#else +#define SZPTR "4" +#define BSZPTR "2" +#define REG_SP "0x4" +#define REG_RA "0x8" +#endif + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE0:\n\n", (int)ctx->codesz, CFRAME_SIZE); +#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_) + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE1:\n\n", (int)ctx->codesz, CFRAME_SIZE); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n"); + fprintf(ctx->fp, + "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n", + LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "Lframe1:\n" + "\t.long LECIE1-LSCIE1\n" + "LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zP\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 5\n" /* augmentation length */ + "\t.byte 0x00\n" /* absptr */ + "\t.long %slj_err_unwind_dwarf\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + "LECIE1:\n\n", LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "LSFDE1:\n" + "\t.long LEFDE1-LASFDE1\n" + "LASFDE1:\n" + "\t.long LASFDE1-Lframe1\n" + "\t.long %slj_vm_asm_begin\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE); + break; + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ +#if LJ_64 + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ +#else + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_64 + fprintf(ctx->fp, "\t.subsections_via_symbols\n"); +#else + fprintf(ctx->fp, + "\t.non_lazy_symbol_pointer\n" + "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" + ".indirect_symbol _lj_err_unwind_dwarf\n" + ".long 0\n"); +#endif + } + break; + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/src/luajit2/src/buildvm_x86.h b/src/luajit2/src/buildvm_x86.h new file mode 100644 index 0000000000..7adb2637e5 --- /dev/null +++ b/src/luajit2/src/buildvm_x86.h @@ -0,0 +1,3405 @@ +/* +** This file has been pre-processed with DynASM. +** http://luajit.org/dynasm.html +** DynASM version 1.3.0, DynASM x86 version 1.3.0 +** DO NOT EDIT! The original file is in "buildvm_x86.dasc". +*/ + +#if DASM_VERSION != 10300 +#error "Version mismatch between DynASM and included encoding engine" +#endif + +#define DASM_SECTION_CODE_OP 0 +#define DASM_SECTION_CODE_SUB 1 +#define DASM_MAXSECTION 2 +static const unsigned char build_actionlist[17111] = { + 254,1,248,10,252,247,198,237,15,132,244,11,131,230,252,248,41,252,242,141, + 76,49,252,248,139,114,252,252,199,68,10,4,237,248,12,131,192,1,137,68,36, + 20,252,247,198,237,15,132,244,13,248,14,129,252,246,239,252,247,198,237,15, + 133,244,10,199,131,233,237,131,230,252,248,41,214,252,247,222,131,232,1,15, + 132,244,248,248,1,139,44,10,137,106,252,248,139,108,10,4,137,106,252,252, + 131,194,8,131,232,1,15,133,244,1,248,2,255,139,108,36,48,137,181,233,248, + 3,139,68,36,20,139,76,36,56,248,4,57,193,15,133,244,252,248,5,131,252,234, + 8,137,149,233,248,15,139,76,36,52,137,141,233,49,192,248,16,131,196,28,91, + 94,95,93,195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252, + 237,131,194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193, + 141,20,202,252,233,244,5,248,8,137,149,233,137,68,36,20,137,202,137,252,233, + 232,251,1,0,139,149,233,252,233,244,3,248,17,137,208,137,204,248,18,139,108, + 36,48,139,173,233,199,133,233,237,252,233,244,16,248,19,248,20,129,225,239, + 137,204,248,21,255,139,108,36,48,185,252,248,252,255,252,255,252,255,184, + 237,139,149,233,139,157,233,129,195,239,139,114,252,252,199,66,252,252,237, + 199,131,233,237,252,233,244,12,248,22,186,237,252,233,244,248,248,23,131, + 232,8,252,233,244,247,248,24,141,68,194,252,248,248,1,15,182,142,233,131, + 198,4,137,149,233,255,137,133,233,137,116,36,24,137,202,248,2,137,252,233, + 232,251,1,0,139,149,233,139,133,233,139,106,252,248,41,208,193,232,3,131, + 192,1,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171, + 248,25,85,87,86,83,131,252,236,28,139,108,36,48,139,76,36,52,190,237,49,192, + 141,188,253,36,233,139,157,233,129,195,239,137,189,233,137,68,36,24,137,68, + 36,52,56,133,233,15,132,244,249,199,131,233,237,136,133,233,139,149,233,139, + 133,233,41,200,193,232,3,131,192,1,41,209,139,114,252,252,137,68,36,20,252, + 247,198,237,255,15,132,244,13,252,233,244,14,248,26,85,87,86,83,131,252,236, + 28,190,237,252,233,244,247,248,27,85,87,86,83,131,252,236,28,190,237,248, + 1,139,108,36,48,139,76,36,52,139,189,233,137,124,36,52,137,108,36,24,137, + 165,233,248,2,139,157,233,129,195,239,248,3,199,131,233,237,139,149,233,255, + 1,206,41,214,139,133,233,41,200,193,232,3,131,192,1,248,28,139,105,252,248, + 129,121,253,252,252,239,15,133,244,29,248,30,137,202,137,114,252,252,139, + 181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,248,31, + 85,87,86,83,131,252,236,28,139,108,36,48,139,68,36,56,139,76,36,52,139,84, + 36,60,137,108,36,24,139,189,233,43,189,233,199,68,36,60,0,0,0,0,137,124,36, + 56,137,68,36,8,137,76,36,4,137,44,36,139,189,233,137,124,36,52,137,165,233, + 252,255,210,133,192,15,132,244,15,137,193,190,237,252,233,244,2,248,11,1, + 209,131,230,252,248,137,213,41,252,242,199,68,193,252,252,237,137,200,139, + 117,252,244,139,77,252,240,133,201,15,132,244,247,255,139,122,252,248,139, + 191,233,139,191,233,252,255,225,248,1,41,213,193,252,237,3,141,69,252,255, + 252,233,244,32,248,33,15,182,78,252,255,131,252,237,16,141,12,202,41,252, + 233,15,132,244,34,252,247,217,193,252,233,3,137,76,36,8,139,72,4,139,0,137, + 77,4,137,69,0,137,108,36,4,252,233,244,35,248,36,137,68,36,16,199,68,36,20, + 237,141,68,36,16,128,126,252,252,235,15,133,244,247,141,139,233,137,41,199, + 65,4,237,137,205,252,233,244,248,248,37,255,15,182,70,252,254,255,199,68, + 36,20,237,137,68,36,16,255,252,242,15,42,192,252,242,15,17,68,36,16,255,137, + 68,36,12,219,68,36,12,221,92,36,16,255,141,68,36,16,252,233,244,247,248,38, + 15,182,70,252,254,141,4,194,248,1,15,182,110,252,255,141,44,252,234,248,2, + 137,108,36,4,139,108,36,48,137,68,36,8,137,44,36,137,149,233,137,116,36,24, + 232,251,1,1,139,149,233,133,192,15,132,244,249,248,34,15,182,78,252,253,139, + 104,4,139,0,137,108,202,4,137,4,202,139,6,15,182,204,15,182,232,131,198,4, + 193,232,16,252,255,36,171,248,3,139,141,233,137,113,252,244,141,177,233,41, + 214,139,105,252,248,184,237,252,233,244,30,248,39,137,68,36,16,199,68,36, + 20,237,141,68,36,16,128,126,252,252,235,15,133,244,247,255,141,139,233,137, + 41,199,65,4,237,137,205,252,233,244,248,248,40,15,182,70,252,254,255,141, + 68,36,16,252,233,244,247,248,41,15,182,70,252,254,141,4,194,248,1,15,182, + 110,252,255,141,44,252,234,248,2,137,108,36,4,139,108,36,48,137,68,36,8,137, + 44,36,137,149,233,137,116,36,24,232,251,1,2,139,149,233,133,192,15,132,244, + 249,15,182,78,252,253,139,108,202,4,139,12,202,137,104,4,137,8,248,42,139, + 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,3,139,141, + 233,137,113,252,244,15,182,70,252,253,139,108,194,4,139,4,194,137,105,20, + 137,65,16,141,177,233,41,214,139,105,252,248,184,237,252,233,244,30,248,43, + 15,182,110,252,252,141,4,194,141,12,202,137,108,36,12,139,108,36,48,137,68, + 36,8,137,76,36,4,137,44,36,137,149,233,137,116,36,24,232,251,1,3,248,3,139, + 149,233,255,131,252,248,1,15,135,244,44,248,4,141,118,4,15,130,244,252,248, + 5,15,183,70,252,254,141,180,253,134,233,248,6,139,6,15,182,204,15,182,232, + 131,198,4,193,232,16,252,255,36,171,248,45,131,198,4,129,120,253,4,239,15, + 130,244,5,252,233,244,6,248,46,129,120,253,4,239,252,233,244,4,248,47,131, + 252,238,4,137,108,36,12,139,108,36,48,137,68,36,8,137,76,36,4,137,44,36,137, + 149,233,255,137,116,36,24,232,251,1,4,252,233,244,3,248,48,255,131,252,238, + 4,139,108,36,48,137,149,233,137,252,233,139,86,252,252,137,116,36,24,232, + 251,1,5,252,233,244,3,255,248,49,255,15,182,110,252,255,255,248,50,141,4, + 199,252,233,244,247,248,51,255,248,52,141,4,199,141,44,252,234,149,252,233, + 244,248,248,53,141,4,194,137,197,252,233,244,248,248,54,255,248,55,141,4, + 194,248,1,141,44,252,234,248,2,141,12,202,137,108,36,8,139,108,36,48,137, + 68,36,12,15,182,70,252,252,137,76,36,4,137,68,36,16,137,44,36,137,149,233, + 137,116,36,24,232,251,1,6,139,149,233,133,192,15,132,244,42,248,44,137,193, + 41,208,137,113,252,244,141,176,233,184,237,252,233,244,28,248,56,139,108, + 36,48,137,149,233,141,20,194,137,252,233,137,116,36,24,232,251,1,7,139,149, + 233,255,133,192,15,133,244,44,15,183,70,252,254,139,12,194,252,233,244,57, + 255,252,233,244,44,255,248,58,141,76,202,8,248,29,137,76,36,20,137,68,36, + 16,131,252,233,8,141,4,193,139,108,36,48,137,76,36,4,137,68,36,8,137,44,36, + 137,149,233,137,116,36,24,232,251,1,8,139,149,233,139,76,36,20,139,68,36, + 16,139,105,252,248,131,192,1,57,215,15,132,244,59,137,202,137,114,252,252, + 139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,248, + 60,139,108,36,48,137,149,233,137,202,137,252,233,137,116,36,24,232,251,1, + 9,139,149,233,139,70,252,252,15,182,204,15,182,232,193,232,16,252,255,164, + 253,171,233,248,61,129,252,248,239,15,130,244,62,139,106,4,129,252,253,239, + 15,131,244,62,139,114,252,252,137,68,36,20,137,106,252,252,139,42,137,106, + 252,248,131,232,2,15,132,244,248,255,137,209,248,1,131,193,8,139,105,4,137, + 105,252,252,139,41,137,105,252,248,131,232,1,15,133,244,1,248,2,139,68,36, + 20,252,233,244,63,248,64,129,252,248,239,15,130,244,62,139,106,4,184,237, + 252,247,213,57,232,255,15,71,197,255,15,134,244,247,137,232,248,1,255,248, + 2,139,106,252,248,139,132,253,197,233,139,114,252,252,199,66,252,252,237, + 137,66,252,248,252,233,244,65,248,66,129,252,248,239,15,130,244,62,139,106, + 4,139,114,252,252,129,252,253,239,15,133,244,252,248,1,139,42,139,173,233, + 248,2,133,252,237,199,66,252,252,237,15,132,244,65,139,131,233,199,66,252, + 252,237,255,137,106,252,248,139,141,233,35,136,233,105,201,239,3,141,233, + 248,3,129,185,233,239,15,133,244,250,57,129,233,15,132,244,251,248,4,139, + 137,233,133,201,15,133,244,3,252,233,244,65,248,5,139,105,4,129,252,253,239, + 255,15,132,244,65,139,1,137,106,252,252,137,66,252,248,252,233,244,65,248, + 6,129,252,253,239,15,132,244,1,129,252,253,239,15,135,244,254,189,237,248, + 8,252,247,213,139,172,253,171,233,252,233,244,2,248,67,129,252,248,239,15, + 130,244,62,255,129,122,253,4,239,15,133,244,62,139,42,131,189,233,0,15,133, + 244,62,129,122,253,12,239,15,133,244,62,139,66,8,137,133,233,139,114,252, + 252,199,66,252,252,237,137,106,252,248,252,246,133,233,235,15,132,244,247, + 128,165,233,235,139,131,233,137,171,233,137,133,233,248,1,255,252,233,244, + 65,248,68,129,252,248,239,15,130,244,62,129,122,253,4,239,15,133,244,62,139, + 2,139,108,36,48,137,68,36,4,137,44,36,137,213,131,194,8,137,84,36,8,232,251, + 1,10,137,252,234,139,40,139,64,4,139,114,252,252,137,106,252,248,137,66,252, + 252,252,233,244,65,248,69,129,252,248,239,15,133,244,62,129,122,253,4,239, + 255,15,133,244,247,139,42,252,233,244,70,248,1,15,135,244,62,255,15,131,244, + 62,255,252,242,15,16,2,252,233,244,71,255,221,2,252,233,244,72,255,248,73, + 129,252,248,239,15,130,244,62,139,114,252,252,129,122,253,4,239,15,133,244, + 249,139,2,248,2,199,66,252,252,237,137,66,252,248,252,233,244,65,248,3,129, + 122,253,4,239,15,135,244,62,131,187,233,0,15,133,244,62,139,171,233,59,171, + 233,255,15,130,244,247,232,244,74,248,1,139,108,36,48,137,149,233,137,116, + 36,24,137,252,233,255,232,251,1,11,255,232,251,1,12,255,139,149,233,252,233, + 244,2,248,75,129,252,248,239,15,130,244,62,15,132,244,248,248,1,129,122,253, + 4,239,15,133,244,62,139,108,36,48,137,149,233,137,149,233,139,114,252,252, + 139,2,137,68,36,4,137,44,36,131,194,8,137,84,36,8,137,116,36,24,232,251,1, + 13,139,149,233,133,192,15,132,244,249,139,106,8,139,66,12,137,106,252,248, + 137,66,252,252,139,106,16,139,66,20,137,42,137,66,4,248,76,184,237,255,252, + 233,244,77,248,2,199,66,12,237,252,233,244,1,248,3,199,66,252,252,237,252, + 233,244,65,248,78,129,252,248,239,15,130,244,62,139,42,129,122,253,4,239, + 15,133,244,62,255,131,189,233,0,15,133,244,62,255,139,106,252,248,139,133, + 233,139,114,252,252,199,66,252,252,237,137,66,252,248,199,66,12,237,184,237, + 252,233,244,77,248,79,129,252,248,239,15,130,244,62,129,122,253,4,239,15, + 133,244,62,129,122,253,12,239,255,139,114,252,252,255,139,66,8,131,192,1, + 199,66,252,252,237,137,66,252,248,255,252,242,15,16,66,8,189,0,0,252,240, + 63,102,15,110,205,102,15,112,201,81,252,242,15,88,193,252,242,15,45,192,252, + 242,15,17,66,252,248,255,221,66,8,217,232,222,193,219,20,36,221,90,252,248, + 139,4,36,255,139,42,59,133,233,15,131,244,248,193,224,3,3,133,233,248,1,129, + 120,253,4,239,15,132,244,80,139,40,139,64,4,137,42,137,66,4,252,233,244,76, + 248,2,131,189,233,0,15,132,244,80,137,252,233,137,213,137,194,232,251,1,14, + 137,252,234,133,192,15,133,244,1,248,80,184,237,252,233,244,77,248,81,255, + 139,106,252,248,139,133,233,139,114,252,252,199,66,252,252,237,137,66,252, + 248,255,199,66,12,237,199,66,8,0,0,0,0,255,15,87,192,252,242,15,17,66,8,255, + 217,252,238,221,90,8,255,184,237,252,233,244,77,248,82,129,252,248,239,15, + 130,244,62,141,74,8,131,232,1,190,237,248,1,15,182,171,233,193,252,237,235, + 131,229,1,1,252,238,252,233,244,28,248,83,129,252,248,239,15,130,244,62,129, + 122,253,12,239,15,133,244,62,255,139,106,4,137,106,12,199,66,4,237,139,42, + 139,114,8,137,106,8,137,50,141,74,16,131,232,2,190,237,252,233,244,1,248, + 84,129,252,248,239,15,130,244,62,139,42,139,114,252,252,137,116,36,24,137, + 44,36,129,122,253,4,239,15,133,244,62,131,189,233,0,15,133,244,62,128,189, + 233,235,15,135,244,62,139,141,233,15,132,244,247,255,59,141,233,15,132,244, + 62,248,1,141,116,193,252,240,59,181,233,15,135,244,62,137,181,233,139,108, + 36,48,137,149,233,131,194,8,137,149,233,141,108,194,232,41,252,245,57,206, + 15,132,244,249,248,2,139,68,46,4,137,70,252,252,139,4,46,137,70,252,248,131, + 252,238,8,57,206,15,133,244,2,248,3,137,76,36,4,49,201,137,76,36,12,137,76, + 36,8,232,244,25,199,131,233,237,255,139,108,36,48,139,52,36,139,149,233,129, + 252,248,239,15,135,244,254,248,4,139,142,233,139,190,233,137,142,233,137, + 252,254,41,206,15,132,244,252,141,4,50,193,252,238,3,59,133,233,15,135,244, + 255,137,213,41,205,248,5,139,1,137,4,41,139,65,4,137,68,41,4,131,193,8,57, + 252,249,15,133,244,5,248,6,141,70,2,199,66,252,252,237,248,7,139,116,36,24, + 137,68,36,20,185,252,248,252,255,252,255,252,255,252,247,198,237,255,15,132, + 244,13,252,233,244,14,248,8,199,66,252,252,237,139,142,233,131,252,233,8, + 137,142,233,139,1,137,2,139,65,4,137,66,4,184,237,252,233,244,7,248,9,139, + 12,36,137,185,233,137,252,242,137,252,233,232,251,1,0,139,52,36,139,149,233, + 252,233,244,4,248,85,139,106,252,248,139,173,233,139,114,252,252,137,116, + 36,24,137,44,36,131,189,233,0,15,133,244,62,255,128,189,233,235,15,135,244, + 62,139,141,233,15,132,244,247,59,141,233,15,132,244,62,248,1,141,116,193, + 252,248,59,181,233,15,135,244,62,137,181,233,139,108,36,48,137,149,233,137, + 149,233,141,108,194,252,240,41,252,245,57,206,15,132,244,249,248,2,255,139, + 68,46,4,137,70,252,252,139,4,46,137,70,252,248,131,252,238,8,57,206,15,133, + 244,2,248,3,137,76,36,4,49,201,137,76,36,12,137,76,36,8,232,244,25,199,131, + 233,237,139,108,36,48,139,52,36,139,149,233,129,252,248,239,15,135,244,254, + 248,4,139,142,233,139,190,233,137,142,233,137,252,254,41,206,15,132,244,252, + 141,4,50,193,252,238,3,59,133,233,15,135,244,255,255,137,213,41,205,248,5, + 139,1,137,4,41,139,65,4,137,68,41,4,131,193,8,57,252,249,15,133,244,5,248, + 6,141,70,1,248,7,139,116,36,24,137,68,36,20,49,201,252,247,198,237,15,132, + 244,13,252,233,244,14,248,8,137,252,242,137,252,233,232,251,1,15,248,9,139, + 12,36,137,185,233,137,252,242,137,252,233,232,251,1,0,139,52,36,139,149,233, + 252,233,244,4,248,86,139,108,36,48,252,247,133,233,237,15,132,244,62,255, + 137,149,233,141,68,194,252,248,137,133,233,49,192,137,133,233,176,235,136, + 133,233,252,233,244,16,255,248,70,255,248,72,139,114,252,252,221,90,252,248, + 252,233,244,65,255,248,87,129,252,248,239,15,130,244,62,255,129,122,253,4, + 239,15,133,244,248,139,42,131,252,253,0,15,137,244,70,252,247,221,15,136, + 244,247,248,88,248,70,139,114,252,252,199,66,252,252,237,137,106,252,248, + 252,233,244,65,248,1,139,114,252,252,199,66,252,252,0,0,224,65,199,66,252, + 248,0,0,0,0,252,233,244,65,248,2,15,135,244,62,255,129,122,253,4,239,15,131, + 244,62,255,252,242,15,16,2,102,15,252,239,201,102,15,118,201,102,15,115,209, + 1,15,84,193,248,71,139,114,252,252,252,242,15,17,66,252,248,255,221,2,217, + 225,248,71,248,72,139,114,252,252,221,90,252,248,255,248,65,184,237,248,77, + 137,68,36,20,248,63,252,247,198,237,15,133,244,253,248,5,56,70,252,255,15, + 135,244,252,15,182,78,252,253,252,247,209,141,20,202,139,6,15,182,204,15, + 182,232,131,198,4,193,232,16,252,255,36,171,248,6,199,68,194,252,244,237, + 131,192,1,252,233,244,5,248,7,185,252,248,252,255,252,255,252,255,252,233, + 244,14,248,89,255,129,122,253,4,239,15,133,244,247,139,42,252,233,244,70, + 248,1,15,135,244,62,255,252,242,15,16,2,232,244,90,255,252,242,15,45,232, + 129,252,253,0,0,0,128,15,133,244,70,252,242,15,42,205,102,15,46,193,15,138, + 244,71,15,132,244,70,255,221,2,232,244,90,255,219,20,36,139,44,36,129,252, + 253,0,0,0,128,15,133,244,248,217,192,219,4,36,255,223,252,233,221,216,255, + 218,252,233,223,224,158,255,15,138,244,72,15,133,244,72,248,2,221,216,252, + 233,244,70,255,248,91,255,252,242,15,16,2,232,244,92,255,221,2,232,244,92, + 255,248,93,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62, + 252,242,15,81,2,252,233,244,71,255,248,93,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,131,244,62,221,2,217,252,250,252,233,244,72,255,248,94,129, + 252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,217,252,237,221, + 2,217,252,241,252,233,244,72,248,95,129,252,248,239,15,130,244,62,129,122, + 253,4,239,15,131,244,62,217,252,236,221,2,217,252,241,252,233,244,72,248, + 96,129,252,248,239,255,15,130,244,62,129,122,253,4,239,15,131,244,62,221, + 2,232,244,97,252,233,244,72,248,98,129,252,248,239,15,130,244,62,129,122, + 253,4,239,15,131,244,62,221,2,217,252,254,252,233,244,72,248,99,129,252,248, + 239,255,15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,252,255,252, + 233,244,72,248,100,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,221,2,217,252,242,221,216,252,233,244,72,248,101,129,252,248,239,15, + 130,244,62,255,129,122,253,4,239,15,131,244,62,221,2,217,192,216,200,217, + 232,222,225,217,252,250,217,252,243,252,233,244,72,248,102,129,252,248,239, + 15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,217,192,216,200,217,232, + 222,225,217,252,250,217,201,217,252,243,252,233,244,72,248,103,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,131,244,62,255,221,2,217,232,217,252, + 243,252,233,244,72,255,248,104,129,252,248,239,15,130,244,62,129,122,253, + 4,239,15,131,244,62,252,242,15,16,2,252,242,15,17,4,36,255,248,104,129,252, + 248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,221,2,221,28,36,255, + 137,213,232,251,1,16,137,252,234,252,233,244,72,255,248,105,129,252,248,239, + 15,130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,252,242,15, + 17,4,36,255,248,105,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,221,2,221,28,36,255,137,213,232,251,1,17,137,252,234,252,233,244,72, + 255,248,106,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62, + 252,242,15,16,2,252,242,15,17,4,36,255,248,106,129,252,248,239,15,130,244, + 62,129,122,253,4,239,15,131,244,62,221,2,221,28,36,255,137,213,232,251,1, + 18,137,252,234,252,233,244,72,248,107,255,248,108,129,252,248,239,15,130, + 244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2,139,106,252,248,252, + 242,15,89,133,233,252,233,244,71,255,248,108,129,252,248,239,15,130,244,62, + 129,122,253,4,239,15,131,244,62,221,2,139,106,252,248,220,141,233,252,233, + 244,72,255,248,109,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131, + 244,62,129,122,253,12,239,15,131,244,62,221,2,221,66,8,217,252,243,252,233, + 244,72,248,110,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244, + 62,129,122,253,12,239,255,15,131,244,62,221,66,8,221,2,217,252,253,221,217, + 252,233,244,72,248,111,129,252,248,239,15,130,244,62,139,106,4,129,252,253, + 239,15,131,244,62,139,114,252,252,139,2,137,106,252,252,137,66,252,248,209, + 229,129,252,253,0,0,224,252,255,15,131,244,249,9,232,15,132,244,249,184,252, + 254,3,0,0,129,252,253,0,0,32,0,15,130,244,250,248,1,193,252,237,21,41,197, + 255,252,242,15,42,197,255,137,108,36,16,219,68,36,16,255,139,106,252,252, + 129,229,252,255,252,255,15,128,129,205,0,0,224,63,137,106,252,252,248,2,255, + 252,242,15,17,2,255,221,26,255,184,237,252,233,244,77,248,3,255,15,87,192, + 252,233,244,2,255,217,252,238,252,233,244,2,255,248,4,255,252,242,15,16,2, + 189,0,0,80,67,102,15,110,205,102,15,112,201,81,252,242,15,89,193,252,242, + 15,17,66,252,248,255,221,2,199,68,36,16,0,0,128,90,216,76,36,16,221,90,252, + 248,255,139,106,252,252,184,52,4,0,0,209,229,252,233,244,1,255,248,112,129, + 252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,252,242,15,16,2, + 255,248,112,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62, + 221,2,255,139,106,4,139,114,252,252,209,229,129,252,253,0,0,224,252,255,15, + 132,244,250,255,15,40,224,232,244,113,252,242,15,92,224,248,1,252,242,15, + 17,66,252,248,252,242,15,17,34,255,217,192,232,244,113,220,252,233,248,1, + 221,90,252,248,221,26,255,139,66,252,252,139,106,4,49,232,15,136,244,249, + 248,2,184,237,252,233,244,77,248,3,129,252,245,0,0,0,128,137,106,4,252,233, + 244,2,248,4,255,15,87,228,252,233,244,1,255,217,252,238,217,201,252,233,244, + 1,255,248,114,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244, + 62,129,122,253,12,239,15,131,244,62,221,66,8,221,2,248,1,217,252,248,223, + 224,158,15,138,244,1,221,217,252,233,244,72,255,248,115,129,252,248,239,15, + 130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244, + 62,252,242,15,16,2,252,242,15,16,74,8,232,244,116,252,233,244,71,255,248, + 115,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122, + 253,12,239,15,131,244,62,221,2,221,66,8,232,244,116,252,233,244,72,255,248, + 117,185,2,0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,193, + 15,131,244,70,129,124,253,202,252,252,239,15,133,244,249,59,108,202,252,248, + 15,79,108,202,252,248,131,193,1,252,233,244,1,248,3,15,135,244,62,255,252, + 233,244,252,248,4,15,135,244,62,255,252,242,15,16,2,248,5,57,193,15,131,244, + 71,129,124,253,202,252,252,239,255,15,130,244,252,15,135,244,62,252,242,15, + 42,76,202,252,248,252,233,244,253,255,248,6,252,242,15,16,76,202,252,248, + 248,7,252,242,15,93,193,131,193,1,252,233,244,5,255,221,2,248,5,57,193,15, + 131,244,72,129,124,253,202,252,252,239,255,15,130,244,252,15,135,244,255, + 219,68,202,252,248,252,233,244,253,255,15,131,244,255,255,248,6,221,68,202, + 252,248,248,7,255,219,252,233,219,209,221,217,255,80,221,225,223,224,252, + 246,196,1,15,132,244,248,217,201,248,2,221,216,88,255,248,118,185,2,0,0,0, + 129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,193,15,131,244,70,129, + 124,253,202,252,252,239,15,133,244,249,59,108,202,252,248,15,76,108,202,252, + 248,131,193,1,252,233,244,1,248,3,15,135,244,62,255,248,6,252,242,15,16,76, + 202,252,248,248,7,252,242,15,95,193,131,193,1,252,233,244,5,255,219,252,233, + 218,209,221,217,255,80,221,225,223,224,252,246,196,1,15,133,244,248,217,201, + 248,2,221,216,88,255,248,9,221,216,252,233,244,62,255,248,119,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,133,244,62,139,42,255,139,173,233, + 252,233,244,70,255,252,242,15,42,133,233,252,233,244,71,255,219,133,233,252, + 233,244,72,255,248,120,129,252,248,239,15,133,244,62,129,122,253,4,239,15, + 133,244,62,139,42,139,114,252,252,131,189,233,1,15,130,244,80,15,182,173, + 233,255,252,242,15,42,197,252,233,244,71,255,137,108,36,16,219,68,36,16,252, + 233,244,72,255,248,121,139,171,233,59,171,233,15,130,244,247,232,244,74,248, + 1,129,252,248,239,15,133,244,62,129,122,253,4,239,255,15,133,244,62,139,42, + 129,252,253,252,255,0,0,0,15,135,244,62,137,108,36,20,255,15,131,244,62,252, + 242,15,44,42,129,252,253,252,255,0,0,0,15,135,244,62,137,108,36,20,255,15, + 131,244,62,221,2,219,92,36,20,129,124,36,20,252,255,0,0,0,15,135,244,62,255, + 199,68,36,8,1,0,0,0,141,68,36,20,248,122,139,108,36,48,137,149,233,137,68, + 36,4,137,44,36,137,116,36,24,232,251,1,19,139,149,233,139,114,252,252,199, + 66,252,252,237,137,66,252,248,252,233,244,65,248,123,139,171,233,59,171,233, + 15,130,244,247,232,244,74,248,1,199,68,36,20,252,255,252,255,252,255,252, + 255,129,252,248,239,15,130,244,62,15,134,244,247,129,122,253,20,239,255,15, + 133,244,62,139,106,16,137,108,36,20,255,15,131,244,62,252,242,15,44,106,16, + 137,108,36,20,255,15,131,244,62,221,66,16,219,92,36,20,255,248,1,129,122, + 253,4,239,15,133,244,62,129,122,253,12,239,255,139,42,137,108,36,12,139,173, + 233,255,139,74,8,255,252,242,15,44,74,8,255,221,66,8,219,92,36,8,139,76,36, + 8,255,139,68,36,20,57,197,15,130,244,251,248,2,133,201,15,142,244,253,248, + 3,139,108,36,12,41,200,15,140,244,124,141,172,253,13,233,131,192,1,248,4, + 137,68,36,8,137,232,252,233,244,122,248,5,15,140,244,252,141,68,40,1,252, + 233,244,2,248,6,137,232,252,233,244,2,248,7,255,15,132,244,254,1,252,233, + 131,193,1,15,143,244,3,248,8,185,1,0,0,0,252,233,244,3,248,124,49,192,252, + 233,244,4,248,125,129,252,248,239,15,130,244,62,139,171,233,59,171,233,15, + 130,244,247,232,244,74,248,1,255,129,122,253,4,239,15,133,244,62,129,122, + 253,12,239,139,42,255,15,133,244,62,139,66,8,255,15,131,244,62,252,242,15, + 44,66,8,255,15,131,244,62,221,66,8,219,92,36,20,139,68,36,20,255,133,192, + 15,142,244,124,131,189,233,1,15,130,244,124,15,133,244,126,57,131,233,15, + 130,244,126,15,182,141,233,139,171,233,137,68,36,8,248,1,136,77,0,131,197, + 1,131,232,1,15,133,244,1,139,131,233,252,233,244,122,248,127,129,252,248, + 239,255,15,130,244,62,139,171,233,59,171,233,15,130,244,247,232,244,74,248, + 1,129,122,253,4,239,15,133,244,62,139,42,139,133,233,133,192,15,132,244,124, + 57,131,233,15,130,244,128,129,197,239,137,116,36,20,137,68,36,8,139,179,233, + 248,1,255,15,182,77,0,131,197,1,131,232,1,136,12,6,15,133,244,1,137,252,240, + 139,116,36,20,252,233,244,122,248,129,129,252,248,239,15,130,244,62,139,171, + 233,59,171,233,15,130,244,247,232,244,74,248,1,129,122,253,4,239,15,133,244, + 62,139,42,139,133,233,57,131,233,255,15,130,244,128,129,197,239,137,116,36, + 20,137,68,36,8,139,179,233,252,233,244,249,248,1,15,182,76,5,0,131,252,249, + 65,15,130,244,248,131,252,249,90,15,135,244,248,131,252,241,32,248,2,136, + 12,6,248,3,131,232,1,15,137,244,1,137,252,240,139,116,36,20,252,233,244,122, + 248,130,129,252,248,239,15,130,244,62,255,139,171,233,59,171,233,15,130,244, + 247,232,244,74,248,1,129,122,253,4,239,15,133,244,62,139,42,139,133,233,57, + 131,233,15,130,244,128,129,197,239,137,116,36,20,137,68,36,8,139,179,233, + 252,233,244,249,248,1,15,182,76,5,0,131,252,249,97,15,130,244,248,255,131, + 252,249,122,15,135,244,248,131,252,241,32,248,2,136,12,6,248,3,131,232,1, + 15,137,244,1,137,252,240,139,116,36,20,252,233,244,122,248,131,129,252,248, + 239,15,130,244,62,129,122,253,4,239,15,133,244,62,137,213,139,10,232,251, + 1,20,137,252,234,255,137,197,252,233,244,70,255,252,242,15,42,192,252,233, + 244,71,255,137,4,36,219,4,36,252,233,244,72,255,248,132,129,252,248,239,15, + 130,244,62,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,88,248, + 1,15,135,244,62,255,252,242,15,16,2,189,0,0,56,67,102,15,110,205,102,15,112, + 201,81,252,242,15,88,193,102,15,126,197,255,221,2,199,68,36,16,0,0,192,89, + 216,68,36,16,221,28,36,255,139,44,36,255,252,233,244,88,255,248,133,129,252, + 248,239,15,130,244,62,255,189,0,0,56,67,102,15,110,205,102,15,112,201,81, + 255,199,68,36,16,0,0,192,89,255,15,133,244,247,139,42,252,233,244,248,248, + 1,15,135,244,62,255,252,242,15,16,2,252,242,15,88,193,102,15,126,197,255, + 221,2,216,68,36,16,221,28,36,139,44,36,255,248,2,137,68,36,20,141,68,194, + 252,240,248,1,57,208,15,134,244,88,129,120,253,4,239,255,15,133,244,248,35, + 40,131,232,8,252,233,244,1,248,2,15,135,244,134,255,15,131,244,134,255,252, + 242,15,16,0,252,242,15,88,193,102,15,126,193,33,205,255,221,0,216,68,36,16, + 221,28,36,35,44,36,255,131,232,8,252,233,244,1,248,135,129,252,248,239,15, + 130,244,62,255,15,133,244,248,11,40,131,232,8,252,233,244,1,248,2,15,135, + 244,134,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,9,205,255,221, + 0,216,68,36,16,221,28,36,11,44,36,255,131,232,8,252,233,244,1,248,136,129, + 252,248,239,15,130,244,62,255,15,133,244,248,51,40,131,232,8,252,233,244, + 1,248,2,15,135,244,134,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193, + 49,205,255,221,0,216,68,36,16,221,28,36,51,44,36,255,131,232,8,252,233,244, + 1,248,137,129,252,248,239,15,130,244,62,129,122,253,4,239,255,221,2,199,68, + 36,16,0,0,192,89,216,68,36,16,221,28,36,139,44,36,255,248,2,15,205,252,233, + 244,88,248,138,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,2, + 252,247,213,255,248,88,252,242,15,42,197,252,233,244,71,255,248,88,137,44, + 36,219,4,36,252,233,244,72,255,248,134,139,68,36,20,252,233,244,62,255,248, + 139,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248,2,129,122,253, + 12,239,15,133,244,62,139,74,8,255,248,139,129,252,248,239,15,130,244,62,129, + 122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242,15,16, + 2,252,242,15,16,74,8,189,0,0,56,67,102,15,110,213,102,15,112,210,81,252,242, + 15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,248,139,129, + 252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122,253,12, + 239,15,131,244,62,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68,36,16,221, + 92,36,8,216,68,36,16,221,28,36,139,76,36,8,139,44,36,255,211,229,252,233, + 244,88,255,248,140,129,252,248,239,15,130,244,62,129,122,253,4,239,255,248, + 140,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122, + 253,12,239,15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,189,0,0,56,67, + 102,15,110,213,102,15,112,210,81,252,242,15,88,194,252,242,15,88,202,102, + 15,126,197,102,15,126,201,255,248,140,129,252,248,239,15,130,244,62,129,122, + 253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,221,2,221,66,8,199, + 68,36,16,0,0,192,89,216,68,36,16,221,92,36,8,216,68,36,16,221,28,36,139,76, + 36,8,139,44,36,255,211,252,237,252,233,244,88,255,248,141,129,252,248,239, + 15,130,244,62,129,122,253,4,239,255,248,141,129,252,248,239,15,130,244,62, + 129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252,242, + 15,16,2,252,242,15,16,74,8,189,0,0,56,67,102,15,110,213,102,15,112,210,81, + 252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,248, + 141,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129,122, + 253,12,239,15,131,244,62,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68,36, + 16,221,92,36,8,216,68,36,16,221,28,36,139,76,36,8,139,44,36,255,211,252,253, + 252,233,244,88,255,248,142,129,252,248,239,15,130,244,62,129,122,253,4,239, + 255,248,142,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62, + 129,122,253,12,239,15,131,244,62,252,242,15,16,2,252,242,15,16,74,8,189,0, + 0,56,67,102,15,110,213,102,15,112,210,81,252,242,15,88,194,252,242,15,88, + 202,102,15,126,197,102,15,126,201,255,248,142,129,252,248,239,15,130,244, + 62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,221,2, + 221,66,8,199,68,36,16,0,0,192,89,216,68,36,16,221,92,36,8,216,68,36,16,221, + 28,36,139,76,36,8,139,44,36,255,211,197,252,233,244,88,255,248,143,129,252, + 248,239,15,130,244,62,129,122,253,4,239,255,248,143,129,252,248,239,15,130, + 244,62,129,122,253,4,239,15,131,244,62,129,122,253,12,239,15,131,244,62,252, + 242,15,16,2,252,242,15,16,74,8,189,0,0,56,67,102,15,110,213,102,15,112,210, + 81,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255, + 248,143,129,252,248,239,15,130,244,62,129,122,253,4,239,15,131,244,62,129, + 122,253,12,239,15,131,244,62,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68, + 36,16,221,92,36,8,216,68,36,16,221,28,36,139,76,36,8,139,44,36,255,211,205, + 252,233,244,88,248,126,184,237,252,233,244,62,248,128,184,237,248,62,139, + 108,36,48,139,114,252,252,137,116,36,24,137,149,233,141,68,194,252,248,141, + 136,233,137,133,233,139,66,252,248,59,141,233,15,135,244,251,137,44,36,252, + 255,144,233,139,149,233,133,192,15,143,244,77,248,1,255,139,141,233,41,209, + 193,252,233,3,133,192,141,65,1,139,106,252,248,15,133,244,32,139,181,233, + 139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,248,32,137,209, + 252,247,198,237,15,133,244,249,15,182,110,252,253,252,247,213,141,20,252, + 234,252,233,244,28,248,3,137,252,245,131,229,252,248,41,252,234,252,233,244, + 28,248,5,186,237,137,252,233,232,251,1,0,139,149,233,49,192,252,233,244,1, + 248,74,93,137,108,36,16,139,108,36,48,137,116,36,24,137,149,233,255,141,68, + 194,252,248,137,252,233,137,133,233,232,251,1,21,139,149,233,139,133,233, + 41,208,193,232,3,131,192,1,139,108,36,16,85,195,248,144,255,15,182,131,233, + 168,235,15,133,244,251,168,235,15,133,244,247,168,235,15,132,244,247,252, + 255,139,233,252,233,244,247,255,248,145,15,182,131,233,168,235,15,133,244, + 251,252,233,244,247,248,146,15,182,131,233,168,235,15,133,244,251,168,235, + 15,132,244,251,252,255,139,233,15,132,244,247,168,235,15,132,244,251,248, + 1,255,139,108,36,48,137,149,233,137,252,242,137,252,233,232,251,1,22,248, + 3,139,149,233,248,4,15,182,78,252,253,248,5,15,182,110,252,252,15,183,70, + 252,254,252,255,164,253,171,233,248,147,131,198,4,139,77,232,137,76,36,20, + 252,233,244,4,248,148,255,139,106,252,248,139,173,233,15,182,133,233,141, + 4,194,139,108,36,48,137,149,233,137,133,233,137,252,242,141,139,233,137,171, + 233,137,116,36,24,232,251,1,23,252,233,244,3,255,248,149,137,116,36,24,255, + 248,150,255,137,116,36,24,131,206,1,248,1,255,141,68,194,252,248,139,108, + 36,48,137,149,233,137,133,233,137,252,242,137,252,233,232,251,1,24,199,68, + 36,24,0,0,0,0,255,131,230,252,254,255,139,149,233,137,193,139,133,233,41, + 208,137,205,15,182,78,252,253,193,232,3,131,192,1,252,255,229,248,151,255, + 85,141,108,36,12,85,83,82,81,80,15,182,69,252,252,138,101,252,248,137,125, + 252,252,137,117,252,248,139,93,0,139,139,233,199,131,233,237,137,131,233, + 137,139,233,129,252,236,239,252,242,15,17,125,216,252,242,15,17,117,208,252, + 242,15,17,109,200,252,242,15,17,101,192,252,242,15,17,93,184,252,242,15,17, + 85,176,252,242,15,17,77,168,252,242,15,17,69,160,139,171,233,139,147,233, + 137,171,233,199,131,233,0,0,0,0,137,149,233,141,84,36,16,141,139,233,232, + 251,1,25,139,141,233,129,225,239,137,204,137,169,233,139,149,233,139,177, + 233,255,248,152,255,133,192,15,136,244,249,137,68,36,20,139,122,252,248,139, + 191,233,139,191,233,199,131,233,0,0,0,0,199,131,233,237,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,129,252,253,239,15,130,244,248,139,68,36, + 20,248,2,252,255,36,171,248,3,252,247,216,137,252,233,137,194,232,251,1,26, + 255,248,90,255,217,124,36,4,137,68,36,8,102,184,0,4,102,11,68,36,4,102,37, + 252,255,252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139, + 68,36,8,195,255,248,153,102,15,252,239,210,102,15,118,210,102,15,115,210, + 1,184,0,0,48,67,102,15,110,216,102,15,112,219,81,15,40,200,102,15,84,202, + 102,15,46,217,15,134,244,247,102,15,85,208,252,242,15,88,203,252,242,15,92, + 203,102,15,86,202,184,0,0,252,240,63,102,15,110,208,102,15,112,210,81,252, + 242,15,194,193,1,102,15,84,194,252,242,15,92,200,15,40,193,248,1,195,248, + 92,255,217,124,36,4,137,68,36,8,102,184,0,8,102,11,68,36,4,102,37,252,255, + 252,251,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,68,36,8, + 195,255,248,154,102,15,252,239,210,102,15,118,210,102,15,115,210,1,184,0, + 0,48,67,102,15,110,216,102,15,112,219,81,15,40,200,102,15,84,202,102,15,46, + 217,15,134,244,247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102, + 15,86,202,184,0,0,252,240,191,102,15,110,208,102,15,112,210,81,252,242,15, + 194,193,6,102,15,84,194,252,242,15,92,200,15,40,193,248,1,195,248,113,255, + 217,124,36,4,137,68,36,8,102,184,0,12,102,11,68,36,4,102,137,68,36,6,217, + 108,36,6,217,252,252,217,108,36,4,139,68,36,8,195,255,248,155,102,15,252, + 239,210,102,15,118,210,102,15,115,210,1,184,0,0,48,67,102,15,110,216,102, + 15,112,219,81,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15, + 85,208,15,40,193,252,242,15,88,203,252,242,15,92,203,184,0,0,252,240,63,102, + 15,110,216,102,15,112,219,81,252,242,15,194,193,1,102,15,84,195,252,242,15, + 92,200,102,15,86,202,15,40,193,248,1,195,248,156,255,15,40,232,252,242,15, + 94,193,102,15,252,239,210,102,15,118,210,102,15,115,210,1,184,0,0,48,67,102, + 15,110,216,102,15,112,219,81,15,40,224,102,15,84,226,102,15,46,220,15,134, + 244,247,102,15,85,208,252,242,15,88,227,252,242,15,92,227,102,15,86,226,184, + 0,0,252,240,63,102,15,110,208,102,15,112,210,81,252,242,15,194,196,1,102, + 15,84,194,252,242,15,92,224,15,40,197,252,242,15,89,204,252,242,15,92,193, + 195,248,1,252,242,15,89,200,15,40,197,252,242,15,92,193,195,255,217,193,216, + 252,241,217,124,36,4,102,184,0,4,102,11,68,36,4,102,37,252,255,252,247,102, + 137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,222,201,222,252,233,195, + 255,248,97,217,252,234,222,201,248,157,217,84,36,4,129,124,36,4,0,0,128,127, + 15,132,244,247,129,124,36,4,0,0,128,252,255,15,132,244,248,248,158,217,192, + 217,252,252,220,252,233,217,201,217,252,240,217,232,222,193,217,252,253,221, + 217,248,1,195,248,2,221,216,217,252,238,195,255,248,116,219,84,36,4,219,68, + 36,4,255,223,252,233,255,221,252,233,223,224,158,255,15,133,244,254,15,138, + 244,255,221,216,139,68,36,4,131,252,248,1,15,142,244,252,248,1,169,1,0,0, + 0,15,133,244,248,216,200,209,232,252,233,244,1,248,2,209,232,15,132,244,251, + 217,192,248,3,216,200,209,232,15,132,244,250,15,131,244,3,220,201,252,233, + 244,3,248,4,255,222,201,248,5,195,248,6,15,132,244,5,15,130,244,253,217,232, + 222,252,241,252,247,216,131,252,248,1,15,132,244,5,252,233,244,1,248,7,221, + 216,217,232,195,248,8,217,84,36,4,217,201,217,84,36,8,139,68,36,4,209,224, + 61,0,0,0,252,255,15,132,244,248,139,68,36,8,209,224,15,132,244,250,61,0,0, + 0,252,255,15,132,244,250,217,252,241,252,233,244,158,248,9,255,217,232,255, + 223,252,234,255,221,252,234,223,224,158,255,15,132,244,247,217,201,248,1, + 221,216,195,248,2,217,225,217,232,255,15,132,244,249,221,216,217,225,217, + 252,238,184,0,0,0,0,15,146,208,209,200,51,68,36,4,15,137,244,249,217,201, + 248,3,221,217,217,225,195,248,4,131,124,36,4,0,15,141,244,3,221,216,221,216, + 133,192,15,132,244,251,217,252,238,195,248,5,199,68,36,4,0,0,128,127,217, + 68,36,4,195,255,248,116,255,248,159,252,242,15,45,193,252,242,15,42,208,102, + 15,46,202,15,133,244,254,15,138,244,255,248,160,131,252,248,1,15,142,244, + 252,248,1,169,1,0,0,0,15,133,244,248,252,242,15,89,192,209,232,252,233,244, + 1,248,2,209,232,15,132,244,251,15,40,200,248,3,252,242,15,89,192,209,232, + 15,132,244,250,15,131,244,3,255,252,242,15,89,200,252,233,244,3,248,4,252, + 242,15,89,193,248,5,195,248,6,15,132,244,5,15,130,244,253,252,247,216,232, + 244,1,184,0,0,252,240,63,102,15,110,200,102,15,112,201,81,252,242,15,94,200, + 15,40,193,195,248,7,184,0,0,252,240,63,102,15,110,192,102,15,112,192,81,195, + 248,8,252,242,15,17,76,36,12,252,242,15,17,68,36,4,131,124,36,12,0,15,133, + 244,247,139,68,36,16,209,224,61,0,0,224,252,255,15,132,244,248,248,1,131, + 124,36,4,0,15,133,244,247,255,139,68,36,8,209,224,15,132,244,250,61,0,0,224, + 252,255,15,132,244,251,248,1,221,68,36,12,221,68,36,4,217,252,241,217,192, + 217,252,252,220,252,233,217,201,217,252,240,217,232,222,193,217,252,253,221, + 217,221,92,36,4,252,242,15,16,68,36,4,195,248,9,184,0,0,252,240,63,102,15, + 110,208,102,15,112,210,81,102,15,46,194,15,132,244,247,15,40,193,248,1,195, + 248,2,102,15,252,239,210,102,15,118,210,102,15,115,210,1,102,15,84,194,184, + 0,0,252,240,63,102,15,110,208,102,15,112,210,81,102,15,46,194,15,132,244, + 1,102,15,80,193,15,87,192,136,196,15,146,208,48,224,15,133,244,1,248,3,184, + 0,0,252,240,127,102,15,110,192,102,15,112,192,81,195,248,4,102,15,80,193, + 133,192,15,133,244,3,15,87,192,195,248,5,102,15,80,193,133,192,15,132,244, + 3,255,15,87,192,195,248,161,255,139,68,36,12,252,242,15,16,68,36,4,131,252, + 248,1,15,132,244,247,15,135,244,248,232,244,90,252,233,244,253,248,1,232, + 244,92,252,233,244,253,248,2,131,252,248,3,15,132,244,247,15,135,244,248, + 232,244,113,255,252,233,244,253,248,1,252,242,15,81,192,248,7,252,242,15, + 17,68,36,4,221,68,36,4,195,248,2,221,68,36,4,131,252,248,5,15,130,244,97, + 15,132,244,157,248,2,131,252,248,7,15,132,244,247,15,135,244,248,217,252, + 237,217,201,217,252,241,195,248,1,217,232,217,201,217,252,241,195,248,2,131, + 252,248,9,15,132,244,247,15,135,244,248,255,217,252,236,217,201,217,252,241, + 195,248,1,217,252,254,195,248,2,131,252,248,11,15,132,244,247,15,135,244, + 255,217,252,255,195,248,1,217,252,242,221,216,195,255,139,68,36,12,221,68, + 36,4,131,252,248,1,15,130,244,90,15,132,244,92,131,252,248,3,15,130,244,113, + 15,135,244,248,217,252,250,195,248,2,131,252,248,5,15,130,244,97,15,132,244, + 157,131,252,248,7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252, + 241,195,248,1,217,232,217,201,217,252,241,195,248,2,131,252,248,9,15,132, + 244,247,255,15,135,244,248,217,252,236,217,201,217,252,241,195,248,1,217, + 252,254,195,248,2,131,252,248,11,15,132,244,247,15,135,244,255,217,252,255, + 195,248,1,217,252,242,221,216,195,255,248,9,204,255,248,162,255,139,68,36, + 20,252,242,15,16,68,36,4,252,242,15,16,76,36,12,131,252,248,1,15,132,244, + 247,15,135,244,248,252,242,15,88,193,248,7,252,242,15,17,68,36,4,221,68,36, + 4,195,248,1,252,242,15,92,193,252,233,244,7,248,2,131,252,248,3,15,132,244, + 247,15,135,244,248,252,242,15,89,193,252,233,244,7,248,1,252,242,15,94,193, + 252,233,244,7,248,2,131,252,248,5,15,132,244,247,255,15,135,244,248,232,244, + 156,252,233,244,7,248,1,90,232,244,116,82,252,233,244,7,248,2,131,252,248, + 7,15,132,244,247,15,135,244,248,184,0,0,0,128,102,15,110,200,102,15,112,201, + 81,15,87,193,252,233,244,7,248,1,102,15,252,239,201,102,15,118,201,102,15, + 115,209,1,15,84,193,252,233,244,7,248,2,255,131,252,248,9,15,135,244,248, + 221,68,36,4,221,68,36,12,15,132,244,247,217,252,243,195,248,1,217,201,217, + 252,253,221,217,195,248,2,131,252,248,11,15,132,244,247,15,135,244,255,252, + 242,15,93,193,252,233,244,7,248,1,252,242,15,95,193,252,233,244,7,248,9,204, + 255,139,68,36,20,221,68,36,4,221,68,36,12,131,252,248,1,15,132,244,247,15, + 135,244,248,222,193,195,248,1,222,252,233,195,248,2,131,252,248,3,15,132, + 244,247,15,135,244,248,222,201,195,248,1,222,252,249,195,248,2,131,252,248, + 5,15,130,244,156,15,132,244,116,131,252,248,7,15,132,244,247,15,135,244,248, + 255,221,216,217,224,195,248,1,221,216,217,225,195,248,2,131,252,248,9,15, + 132,244,247,15,135,244,248,217,252,243,195,248,1,217,201,217,252,253,221, + 217,195,248,2,131,252,248,11,15,132,244,247,15,135,244,255,255,219,252,233, + 219,209,221,217,195,248,1,219,252,233,218,209,221,217,195,255,221,225,223, + 224,252,246,196,1,15,132,244,248,217,201,248,2,221,216,195,248,1,221,225, + 223,224,252,246,196,1,15,133,244,248,217,201,248,2,221,216,195,255,248,163, + 156,90,137,209,129,252,242,0,0,32,0,82,157,156,90,49,192,57,209,15,132,244, + 247,139,68,36,4,87,83,15,162,139,124,36,16,137,7,137,95,4,137,79,8,137,87, + 12,91,95,248,1,195,248,164,255,85,137,229,83,137,203,43,163,233,255,137,163, + 233,255,15,182,139,233,131,252,233,1,15,136,244,248,248,1,139,132,253,139, + 233,137,4,140,131,252,233,1,15,137,244,1,248,2,139,139,233,139,147,233,252, + 255,147,233,137,131,233,137,147,233,128,187,233,1,15,130,244,253,15,132,244, + 252,221,155,233,252,233,244,253,248,6,255,217,155,233,248,7,255,41,163,233, + 255,139,93,252,252,201,195,255,248,165,255,249,255,129,124,253,202,4,239, + 15,133,244,253,129,124,253,194,4,239,15,133,244,254,139,44,202,131,198,4, + 59,44,194,255,15,141,244,255,255,15,140,244,255,255,15,143,244,255,255,15, + 142,244,255,255,248,6,15,183,70,252,254,141,180,253,134,233,248,9,139,6,15, + 182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,7,15,135,244,43, + 129,124,253,194,4,239,15,130,244,247,15,133,244,43,255,252,242,15,42,4,194, + 252,233,244,248,255,221,4,202,219,4,194,252,233,244,249,255,248,8,15,135, + 244,43,255,252,242,15,42,12,202,252,242,15,16,4,194,131,198,4,102,15,46,193, + 255,15,134,244,9,255,15,135,244,9,255,15,130,244,9,255,15,131,244,9,255,252, + 233,244,6,255,219,4,202,252,233,244,248,255,129,124,253,202,4,239,15,131, + 244,43,129,124,253,194,4,239,15,131,244,43,255,248,1,252,242,15,16,4,194, + 248,2,131,198,4,102,15,46,4,202,248,3,255,248,1,221,4,202,248,2,221,4,194, + 248,3,131,198,4,255,15,134,244,247,255,15,135,244,247,255,15,130,244,247, + 255,15,131,244,247,255,15,183,70,252,254,141,180,253,134,233,248,1,139,6, + 15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,139,108,194, + 4,131,198,4,255,129,252,253,239,15,133,244,253,129,124,253,202,4,239,15,133, + 244,254,139,44,194,59,44,202,255,15,133,244,255,255,15,132,244,255,255,15, + 183,70,252,254,141,180,253,134,233,248,9,139,6,15,182,204,15,182,232,131, + 198,4,193,232,16,252,255,36,171,248,7,15,135,244,251,129,124,253,202,4,239, + 15,130,244,247,15,133,244,251,255,252,242,15,42,4,202,255,219,4,202,255,252, + 233,244,248,248,8,15,135,244,251,255,252,242,15,42,4,194,102,15,46,4,202, + 255,219,4,194,221,4,202,255,252,233,244,250,255,129,252,253,239,15,131,244, + 251,129,124,253,202,4,239,15,131,244,251,255,248,1,252,242,15,16,4,202,248, + 2,102,15,46,4,194,248,4,255,248,1,221,4,202,248,2,221,4,194,248,4,255,15, + 138,244,248,15,133,244,248,255,15,138,244,248,15,132,244,247,255,248,1,15, + 183,70,252,254,141,180,253,134,233,248,2,255,248,2,15,183,70,252,254,141, + 180,253,134,233,248,1,255,252,233,244,9,255,248,5,255,129,252,253,239,15, + 132,244,48,129,124,253,202,4,239,15,132,244,48,255,57,108,202,4,15,133,244, + 2,129,252,253,239,15,131,244,1,139,12,202,139,4,194,57,193,15,132,244,1,129, + 252,253,239,15,135,244,2,139,169,233,133,252,237,15,132,244,2,252,246,133, + 233,235,15,133,244,2,255,49,252,237,255,189,1,0,0,0,255,252,233,244,47,255, + 248,3,129,252,253,239,255,15,133,244,9,255,252,233,244,48,255,252,247,208, + 139,108,202,4,131,198,4,129,252,253,239,15,133,244,249,139,12,202,59,12,135, + 255,139,108,202,4,131,198,4,255,129,252,253,239,15,133,244,253,129,124,253, + 199,4,239,15,133,244,254,139,44,199,59,44,202,255,15,183,70,252,254,141,180, + 253,134,233,248,9,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255, + 36,171,248,7,15,135,244,249,129,124,253,199,4,239,15,130,244,247,255,252, + 242,15,42,4,199,255,219,4,199,255,252,233,244,248,248,8,255,252,242,15,42, + 4,202,102,15,46,4,199,255,219,4,202,221,4,199,255,129,252,253,239,15,131, + 244,249,255,248,1,252,242,15,16,4,199,248,2,102,15,46,4,202,248,4,255,248, + 1,221,4,199,248,2,221,4,202,248,4,255,252,247,208,139,108,202,4,131,198,4, + 57,197,255,15,133,244,249,15,183,70,252,254,141,180,253,134,233,248,2,139, + 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,3,129,252, + 253,239,15,133,244,2,252,233,244,48,255,15,132,244,248,129,252,253,239,15, + 132,244,48,15,183,70,252,254,141,180,253,134,233,248,2,139,6,15,182,204,15, + 182,232,131,198,4,193,232,16,252,255,36,171,255,139,108,194,4,131,198,4,129, + 252,253,239,255,137,108,202,4,139,44,194,137,44,202,255,139,108,194,4,139, + 4,194,137,108,202,4,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193,232, + 16,252,255,36,171,255,49,252,237,129,124,253,194,4,239,129,213,239,137,108, + 202,4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255, + 129,124,253,194,4,239,15,133,244,251,139,44,194,252,247,221,15,128,244,250, + 199,68,202,4,237,137,44,202,248,9,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,171,248,4,199,68,202,4,0,0,224,65,199,4,202,0,0,0,0,252, + 233,244,9,248,5,15,135,244,53,255,129,124,253,194,4,239,15,131,244,53,255, + 252,242,15,16,4,194,184,0,0,0,128,102,15,110,200,102,15,112,201,81,15,87, + 193,252,242,15,17,4,202,255,221,4,194,217,224,221,28,202,255,129,124,253, + 194,4,239,15,133,244,248,139,4,194,255,139,128,233,248,1,199,68,202,4,237, + 137,4,202,255,15,87,192,252,242,15,42,128,233,248,1,252,242,15,17,4,202,255, + 219,128,233,248,1,221,28,202,255,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,171,248,2,129,124,253,194,4,239,15,133,244,56,139,12,194, + 255,139,169,233,131,252,253,0,15,133,244,255,248,3,255,248,57,137,213,232, + 251,1,20,255,252,242,15,42,192,255,137,4,36,219,4,36,255,137,252,234,15,182, + 78,252,253,252,233,244,1,255,248,9,252,246,133,233,235,15,133,244,3,252,233, + 244,56,255,15,182,252,236,15,182,192,255,129,124,253,252,234,4,239,15,133, + 244,50,129,124,253,199,4,239,15,133,244,50,139,44,252,234,3,44,199,15,128, + 244,49,255,129,124,253,252,234,4,239,15,133,244,52,129,124,253,199,4,239, + 15,133,244,52,139,4,199,3,4,252,234,15,128,244,51,255,129,124,253,252,234, + 4,239,15,133,244,55,129,124,253,194,4,239,15,133,244,55,139,44,252,234,3, + 44,194,15,128,244,54,255,199,68,202,4,237,255,129,124,253,252,234,4,239,15, + 131,244,50,255,129,124,253,199,4,239,15,131,244,50,255,252,242,15,16,4,252, + 234,252,242,15,88,4,199,255,221,4,252,234,220,4,199,255,129,124,253,252,234, + 4,239,15,131,244,52,255,129,124,253,199,4,239,15,131,244,52,255,252,242,15, + 16,4,199,252,242,15,88,4,252,234,255,221,4,199,220,4,252,234,255,129,124, + 253,252,234,4,239,15,131,244,55,129,124,253,194,4,239,15,131,244,55,255,252, + 242,15,16,4,252,234,252,242,15,88,4,194,255,221,4,252,234,220,4,194,255,129, + 124,253,252,234,4,239,15,133,244,50,129,124,253,199,4,239,15,133,244,50,139, + 44,252,234,43,44,199,15,128,244,49,255,129,124,253,252,234,4,239,15,133,244, + 52,129,124,253,199,4,239,15,133,244,52,139,4,199,43,4,252,234,15,128,244, + 51,255,129,124,253,252,234,4,239,15,133,244,55,129,124,253,194,4,239,15,133, + 244,55,139,44,252,234,43,44,194,15,128,244,54,255,252,242,15,16,4,252,234, + 252,242,15,92,4,199,255,221,4,252,234,220,36,199,255,252,242,15,16,4,199, + 252,242,15,92,4,252,234,255,221,4,199,220,36,252,234,255,252,242,15,16,4, + 252,234,252,242,15,92,4,194,255,221,4,252,234,220,36,194,255,129,124,253, + 252,234,4,239,15,133,244,50,129,124,253,199,4,239,15,133,244,50,139,44,252, + 234,15,175,44,199,15,128,244,49,255,129,124,253,252,234,4,239,15,133,244, + 52,129,124,253,199,4,239,15,133,244,52,139,4,199,15,175,4,252,234,15,128, + 244,51,255,129,124,253,252,234,4,239,15,133,244,55,129,124,253,194,4,239, + 15,133,244,55,139,44,252,234,15,175,44,194,15,128,244,54,255,252,242,15,16, + 4,252,234,252,242,15,89,4,199,255,221,4,252,234,220,12,199,255,252,242,15, + 16,4,199,252,242,15,89,4,252,234,255,221,4,199,220,12,252,234,255,252,242, + 15,16,4,252,234,252,242,15,89,4,194,255,221,4,252,234,220,12,194,255,252, + 242,15,16,4,252,234,252,242,15,94,4,199,255,221,4,252,234,220,52,199,255, + 252,242,15,16,4,199,252,242,15,94,4,252,234,255,221,4,199,220,52,252,234, + 255,252,242,15,16,4,252,234,252,242,15,94,4,194,255,221,4,252,234,220,52, + 194,255,252,242,15,16,4,252,234,252,242,15,16,12,199,255,221,4,252,234,221, + 4,199,255,252,242,15,16,4,199,252,242,15,16,12,252,234,255,221,4,199,221, + 4,252,234,255,252,242,15,16,4,252,234,252,242,15,16,12,194,255,221,4,252, + 234,221,4,194,255,248,166,232,244,156,255,252,233,244,166,255,232,244,116, + 255,15,182,252,236,15,182,192,141,12,194,41,232,137,76,36,4,137,68,36,8,248, + 35,139,108,36,48,137,44,36,137,149,233,137,116,36,24,232,251,1,27,139,149, + 233,133,192,15,133,244,44,15,182,110,252,255,15,182,78,252,253,139,68,252, + 234,4,139,44,252,234,137,68,202,4,137,44,202,139,6,15,182,204,15,182,232, + 131,198,4,193,232,16,252,255,36,171,255,252,247,208,139,4,135,199,68,202, + 4,237,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255, + 36,171,255,15,191,192,199,68,202,4,237,137,4,202,255,15,191,192,252,242,15, + 42,192,252,242,15,17,4,202,255,223,70,252,254,221,28,202,255,252,242,15,16, + 4,199,252,242,15,17,4,202,255,221,4,199,221,28,202,255,252,247,208,137,68, + 202,4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255, + 141,76,202,12,141,68,194,4,189,237,137,105,252,248,248,1,137,41,131,193,8, + 57,193,15,134,244,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,171,255,139,106,252,248,139,172,253,133,233,139,173,233,139,69,4,139, + 109,0,137,68,202,4,137,44,202,139,6,15,182,204,15,182,232,131,198,4,193,232, + 16,252,255,36,171,255,139,106,252,248,139,172,253,141,233,128,189,233,0,139, + 173,233,139,12,194,139,68,194,4,137,77,0,137,69,4,15,132,244,247,252,246, + 133,233,235,15,133,244,248,248,1,139,6,15,182,204,15,182,232,131,198,4,193, + 232,16,252,255,36,171,248,2,129,232,239,129,252,248,239,15,134,244,1,252, + 246,129,233,235,15,132,244,1,135,213,141,139,233,255,232,251,1,28,137,252, + 234,252,233,244,1,255,252,247,208,139,106,252,248,139,172,253,141,233,139, + 12,135,139,133,233,137,8,199,64,4,237,252,246,133,233,235,15,133,244,248, + 248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248, + 2,252,246,129,233,235,15,132,244,1,128,189,233,0,15,132,244,1,137,213,137, + 194,141,139,233,232,251,1,28,137,252,234,252,233,244,1,255,139,106,252,248, + 255,252,242,15,16,4,199,255,139,172,253,141,233,139,141,233,255,252,242,15, + 17,1,255,221,25,255,252,247,208,139,106,252,248,139,172,253,141,233,139,141, + 233,137,65,4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36, + 171,255,141,180,253,134,233,139,108,36,48,131,189,233,0,15,132,244,247,137, + 149,233,141,20,202,137,252,233,232,251,1,29,139,149,233,248,1,139,6,15,182, + 204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,252,247,208,139,74, + 252,248,139,4,135,139,108,36,48,137,76,36,8,137,68,36,4,137,44,36,137,149, + 233,137,116,36,24,232,251,1,30,139,149,233,15,182,78,252,253,137,4,202,199, + 68,202,4,237,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36, + 171,255,139,108,36,48,137,149,233,139,139,233,59,139,233,137,116,36,24,15, + 131,244,251,248,1,137,193,37,252,255,7,0,0,193,252,233,11,137,76,36,8,61, + 252,255,7,0,0,15,132,244,249,248,2,137,44,36,137,68,36,4,232,251,1,31,139, + 149,233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,204,15, + 182,232,131,198,4,193,232,16,252,255,36,171,248,3,184,1,8,0,0,252,233,244, + 2,248,5,137,252,233,232,251,1,32,15,183,70,252,254,252,233,244,1,255,252, + 247,208,139,108,36,48,139,139,233,137,116,36,24,59,139,233,137,149,233,15, + 131,244,249,248,2,139,20,135,137,252,233,232,251,1,33,139,149,233,15,182, + 78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,204,15,182,232,131,198, + 4,193,232,16,252,255,36,171,248,3,137,252,233,232,251,1,32,15,183,70,252, + 254,252,247,208,252,233,244,2,255,252,247,208,139,106,252,248,139,173,233, + 139,4,135,252,233,244,167,255,252,247,208,139,106,252,248,139,173,233,139, + 4,135,252,233,244,168,255,15,182,252,236,15,182,192,129,124,253,252,234,4, + 239,15,133,244,38,139,44,252,234,255,129,124,253,194,4,239,15,133,244,251, + 139,4,194,255,129,124,253,194,4,239,15,131,244,251,255,252,242,15,16,4,194, + 252,242,15,45,192,252,242,15,42,200,102,15,46,193,255,221,4,194,219,20,36, + 219,4,36,255,15,133,244,38,255,59,133,233,15,131,244,38,193,224,3,3,133,233, + 129,120,253,4,239,15,132,244,248,139,40,139,64,4,137,44,202,137,68,202,4, + 248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248, + 2,131,189,233,0,15,132,244,249,139,141,233,252,246,129,233,235,15,132,244, + 38,15,182,78,252,253,248,3,199,68,202,4,237,252,233,244,1,248,5,255,129,124, + 253,194,4,239,15,133,244,38,139,4,194,252,233,244,167,255,15,182,252,236, + 15,182,192,252,247,208,139,4,135,129,124,253,252,234,4,239,15,133,244,36, + 139,44,252,234,248,167,139,141,233,35,136,233,105,201,239,3,141,233,248,1, + 129,185,233,239,15,133,244,250,57,129,233,15,133,244,250,129,121,253,4,239, + 15,132,244,251,15,182,70,252,253,139,41,139,73,4,137,44,194,137,76,194,4, + 248,2,255,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171, + 248,3,15,182,70,252,253,199,68,194,4,237,252,233,244,2,248,4,139,137,233, + 133,201,15,133,244,1,248,5,139,141,233,133,201,15,132,244,3,252,246,129,233, + 235,15,133,244,3,252,233,244,36,255,15,182,252,236,15,182,192,129,124,253, + 252,234,4,239,15,133,244,37,139,44,252,234,59,133,233,15,131,244,37,193,224, + 3,3,133,233,129,120,253,4,239,15,132,244,248,139,40,139,64,4,137,44,202,137, + 68,202,4,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36, + 171,248,2,131,189,233,0,15,132,244,249,139,141,233,252,246,129,233,235,15, + 132,244,37,255,15,182,78,252,253,248,3,199,68,202,4,237,252,233,244,1,255, + 15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,41,139,44, + 252,234,255,15,133,244,41,255,59,133,233,15,131,244,41,193,224,3,3,133,233, + 129,120,253,4,239,15,132,244,249,248,1,252,246,133,233,235,15,133,244,253, + 248,2,139,108,202,4,139,12,202,137,104,4,137,8,139,6,15,182,204,15,182,232, + 131,198,4,193,232,16,252,255,36,171,248,3,131,189,233,0,15,132,244,1,139, + 141,233,252,246,129,233,235,255,15,132,244,41,15,182,78,252,253,252,233,244, + 1,248,5,129,124,253,194,4,239,15,133,244,41,139,4,194,252,233,244,168,248, + 7,128,165,233,235,139,139,233,137,171,233,137,141,233,15,182,78,252,253,252, + 233,244,2,255,15,182,252,236,15,182,192,252,247,208,139,4,135,129,124,253, + 252,234,4,239,15,133,244,39,139,44,252,234,248,168,139,141,233,35,136,233, + 105,201,239,198,133,233,0,3,141,233,248,1,129,185,233,239,15,133,244,251, + 57,129,233,15,133,244,251,129,121,253,4,239,15,132,244,250,248,2,255,252, + 246,133,233,235,15,133,244,253,248,3,15,182,70,252,253,139,108,194,4,139, + 4,194,137,105,4,137,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,171,248,4,131,189,233,0,15,132,244,2,137,76,36,16,139,141,233,252, + 246,129,233,235,15,132,244,39,139,76,36,16,252,233,244,2,248,5,139,137,233, + 133,201,15,133,244,1,255,139,141,233,133,201,15,132,244,252,252,246,129,233, + 235,15,132,244,39,248,6,137,68,36,16,199,68,36,20,237,137,108,36,12,141,68, + 36,16,137,108,36,4,139,108,36,48,137,68,36,8,137,44,36,137,149,233,137,116, + 36,24,232,251,1,34,139,149,233,139,108,36,12,137,193,252,233,244,2,248,7, + 128,165,233,235,139,131,233,137,171,233,137,133,233,252,233,244,3,255,15, + 182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,40,139,44,252, + 234,59,133,233,15,131,244,40,193,224,3,3,133,233,129,120,253,4,239,15,132, + 244,249,248,1,252,246,133,233,235,15,133,244,253,248,2,139,108,202,4,139, + 12,202,137,104,4,137,8,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252, + 255,36,171,248,3,131,189,233,0,15,132,244,1,255,139,141,233,252,246,129,233, + 235,15,132,244,40,15,182,78,252,253,252,233,244,1,248,7,128,165,233,235,139, + 139,233,137,171,233,137,141,233,15,182,78,252,253,252,233,244,2,255,137,124, + 36,16,139,60,199,248,1,141,12,202,139,105,252,248,252,246,133,233,235,15, + 133,244,253,248,2,139,68,36,20,131,232,1,15,132,244,250,1,252,248,59,133, + 233,15,135,244,251,41,252,248,193,231,3,3,189,233,248,3,139,41,137,47,139, + 105,4,131,193,8,137,111,4,131,199,8,131,232,1,15,133,244,3,248,4,139,124, + 36,16,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248, + 5,137,108,36,4,139,108,36,48,137,149,233,137,68,36,8,137,44,36,137,116,36, + 24,232,251,1,35,139,149,233,15,182,78,252,253,252,233,244,1,248,7,255,128, + 165,233,235,139,131,233,137,171,233,137,133,233,252,233,244,2,255,3,68,36, + 20,255,129,124,253,202,4,239,139,44,202,15,133,244,58,141,84,202,8,137,114, + 252,252,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36, + 171,255,141,76,202,8,137,215,139,105,252,248,129,121,253,252,252,239,15,133, + 244,29,248,59,139,114,252,252,252,247,198,237,15,133,244,253,248,1,137,106, + 252,248,137,68,36,20,131,232,1,15,132,244,249,248,2,139,41,137,47,139,105, + 4,131,193,8,137,111,4,131,199,8,131,232,1,15,133,244,2,139,106,252,248,248, + 3,139,68,36,20,128,189,233,1,15,135,244,251,248,4,139,181,233,139,14,15,182, + 252,233,15,182,205,131,198,4,252,255,36,171,248,5,255,252,247,198,237,15, + 133,244,4,15,182,78,252,253,252,247,209,141,12,202,139,121,252,248,139,191, + 233,139,191,233,252,233,244,4,248,7,129,252,238,239,252,247,198,237,15,133, + 244,254,41,252,242,137,215,139,114,252,252,252,233,244,1,248,8,129,198,239, + 252,233,244,1,255,141,76,202,8,139,105,232,139,65,252,236,137,41,137,65,4, + 139,105,252,240,139,65,252,244,137,105,8,137,65,12,139,105,224,139,65,228, + 137,105,252,248,137,65,252,252,129,252,248,239,184,237,15,133,244,29,137, + 202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,205,131,198, + 4,252,255,36,171,255,137,124,36,16,137,92,36,20,139,108,202,252,240,139,68, + 202,252,248,139,157,233,131,198,4,139,189,233,248,1,57,216,15,131,244,251, + 129,124,253,199,4,239,15,132,244,250,255,219,68,202,252,248,255,139,108,199, + 4,137,108,202,12,139,44,199,137,108,202,8,131,192,1,255,137,68,202,252,248, + 248,2,15,183,70,252,254,141,180,253,134,233,248,3,139,92,36,20,139,124,36, + 16,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,4, + 131,192,1,255,137,68,202,252,248,255,252,233,244,1,248,5,41,216,248,6,59, + 133,233,15,135,244,3,105,252,248,239,3,189,233,129,191,233,239,15,132,244, + 253,141,92,24,1,139,175,233,139,135,233,137,44,202,137,68,202,4,139,175,233, + 139,135,233,137,108,202,8,137,68,202,12,137,92,202,252,248,252,233,244,2, + 248,7,255,131,192,1,252,233,244,6,255,129,124,253,202,252,236,239,15,133, + 244,251,139,108,202,232,129,124,253,202,252,244,239,15,133,244,251,129,124, + 253,202,252,252,239,15,133,244,251,128,189,233,235,15,133,244,251,141,180, + 253,134,233,199,68,202,252,248,0,0,0,0,248,1,139,6,15,182,204,15,182,232, + 131,198,4,193,232,16,252,255,36,171,248,5,198,70,252,252,235,141,180,253, + 134,233,198,6,235,252,233,244,1,255,15,182,252,236,15,182,192,137,124,36, + 16,141,188,253,194,233,141,12,202,43,122,252,252,133,252,237,15,132,244,251, + 141,108,252,233,252,248,57,215,15,131,244,248,248,1,139,71,252,248,137,1, + 139,71,252,252,131,199,8,137,65,4,131,193,8,57,252,233,15,131,244,249,57, + 215,15,130,244,1,248,2,199,65,4,237,131,193,8,57,252,233,15,130,244,2,248, + 3,139,124,36,16,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255, + 36,171,248,5,199,68,36,20,1,0,0,0,137,208,41,252,248,15,134,244,3,137,197, + 193,252,237,3,131,197,1,137,108,36,20,139,108,36,48,1,200,59,133,233,15,135, + 244,253,248,6,255,139,71,252,248,137,1,139,71,252,252,131,199,8,137,65,4, + 131,193,8,57,215,15,130,244,6,252,233,244,3,248,7,137,149,233,137,141,233, + 137,116,36,24,41,215,139,84,36,20,131,252,234,1,137,252,233,232,251,1,0,139, + 149,233,139,141,233,1,215,252,233,244,6,255,193,225,3,255,248,1,139,114,252, + 252,137,68,36,20,252,247,198,237,15,133,244,253,255,248,13,137,215,131,232, + 1,15,132,244,249,248,2,139,44,15,137,111,252,248,139,108,15,4,137,111,252, + 252,131,199,8,131,232,1,15,133,244,2,248,3,139,68,36,20,15,182,110,252,255, + 248,5,57,197,15,135,244,252,255,139,108,10,4,137,106,252,252,139,44,10,137, + 106,252,248,255,248,5,56,70,252,255,15,135,244,252,255,15,182,78,252,253, + 252,247,209,141,20,202,139,122,252,248,139,191,233,139,191,233,139,6,15,182, + 204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,6,255,199,71,252,252, + 237,131,199,8,255,199,68,194,252,244,237,255,131,192,1,252,233,244,5,248, + 7,141,174,233,252,247,197,237,15,133,244,14,41,252,234,255,1,252,233,255, + 137,252,245,209,252,237,129,229,239,102,131,172,253,43,233,1,15,132,244,148, + 255,141,12,202,255,129,121,253,4,239,15,133,244,255,255,129,121,253,12,239, + 15,133,244,60,129,121,253,20,239,15,133,244,60,139,41,131,121,16,0,15,140, + 244,251,255,129,121,253,12,239,15,133,244,165,129,121,253,20,239,15,133,244, + 165,255,139,105,16,133,252,237,15,136,244,251,3,41,15,128,244,247,137,41, + 255,59,105,8,199,65,28,237,137,105,24,255,15,142,244,253,248,1,248,6,141, + 180,253,134,233,255,141,180,253,134,233,15,183,70,252,254,15,142,245,248, + 1,248,6,255,15,143,244,253,248,6,141,180,253,134,233,248,1,255,248,7,139, + 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,5,255,3,41, + 15,128,244,1,137,41,255,15,141,244,7,255,141,180,253,134,233,15,183,70,252, + 254,15,141,245,255,15,140,244,7,255,252,233,244,6,248,9,255,129,121,253,4, + 239,255,15,131,244,60,129,121,253,12,239,15,131,244,60,255,129,121,253,12, + 239,15,131,244,165,129,121,253,20,239,15,131,244,165,255,139,105,20,255,129, + 252,253,239,15,131,244,60,255,252,242,15,16,1,252,242,15,16,73,8,255,252, + 242,15,88,65,16,252,242,15,17,1,133,252,237,15,136,244,249,255,15,140,244, + 249,255,102,15,46,200,248,1,252,242,15,17,65,24,255,221,65,8,221,1,255,220, + 65,16,221,17,221,81,24,133,252,237,15,136,244,247,255,221,81,24,15,140,244, + 247,255,217,201,248,1,255,15,183,70,252,254,255,15,131,244,7,255,15,131,244, + 248,141,180,253,134,233,255,141,180,253,134,233,15,183,70,252,254,15,131, + 245,255,15,130,244,7,255,15,130,244,248,141,180,253,134,233,255,248,3,102, + 15,46,193,252,233,244,1,255,141,12,202,139,105,4,129,252,253,239,15,132,244, + 247,255,137,105,252,252,139,41,137,105,252,248,252,233,245,255,141,180,253, + 134,233,139,1,137,105,252,252,137,65,252,248,255,139,139,233,139,4,129,139, + 128,233,139,108,36,48,137,147,233,137,171,233,252,255,224,255,141,180,253, + 134,233,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255, + 137,252,245,209,252,237,129,229,239,102,131,172,253,43,233,1,15,132,244,150, + 255,139,190,233,139,108,36,48,141,12,202,59,141,233,15,135,244,24,15,182, + 142,233,57,200,15,134,244,249,248,2,255,15,183,70,252,254,252,233,245,255, + 248,3,199,68,194,252,252,237,131,192,1,57,200,15,134,244,3,252,233,244,2, + 255,141,44,197,237,141,4,194,139,122,252,248,137,104,252,252,137,120,252, + 248,139,108,36,48,141,12,200,59,141,233,15,135,244,23,137,209,137,194,15, + 182,174,233,133,252,237,15,132,244,248,248,1,131,193,8,57,209,15,131,244, + 249,139,121,252,248,137,56,139,121,252,252,137,120,4,131,192,8,199,65,252, + 252,237,131,252,237,1,15,133,244,1,248,2,255,139,190,233,139,6,15,182,204, + 15,182,232,131,198,4,193,232,16,252,255,36,171,255,248,3,199,64,4,237,131, + 192,8,131,252,237,1,15,133,244,3,252,233,244,2,255,139,106,252,248,139,189, + 233,139,108,36,48,141,68,194,252,248,137,149,233,141,136,233,59,141,233,137, + 133,233,255,137,44,36,255,137,124,36,4,137,44,36,255,15,135,244,22,199,131, + 233,237,255,252,255,215,255,252,255,147,233,255,199,131,233,237,139,149,233, + 141,12,194,252,247,217,3,141,233,139,114,252,252,252,233,244,12,255,254,0 +}; + +enum { + GLOB_vm_returnp, + GLOB_cont_dispatch, + GLOB_vm_returnc, + GLOB_BC_RET_Z, + GLOB_vm_return, + GLOB_vm_leave_cp, + GLOB_vm_leave_unw, + GLOB_vm_unwind_c, + GLOB_vm_unwind_c_eh, + GLOB_vm_unwind_rethrow, + GLOB_vm_unwind_ff, + GLOB_vm_unwind_ff_eh, + GLOB_vm_growstack_c, + GLOB_vm_growstack_v, + GLOB_vm_growstack_f, + GLOB_vm_resume, + GLOB_vm_pcall, + GLOB_vm_call, + GLOB_vm_call_dispatch, + GLOB_vmeta_call, + GLOB_vm_call_dispatch_f, + GLOB_vm_cpcall, + GLOB_vm_call_tail, + GLOB_cont_cat, + GLOB_cont_ra, + GLOB_BC_CAT_Z, + GLOB_vmeta_tgets, + GLOB_vmeta_tgetb, + GLOB_vmeta_tgetv, + GLOB_vmeta_tsets, + GLOB_vmeta_tsetb, + GLOB_vmeta_tsetv, + GLOB_cont_nop, + GLOB_vmeta_comp, + GLOB_vmeta_binop, + GLOB_cont_condt, + GLOB_cont_condf, + GLOB_vmeta_equal, + GLOB_vmeta_equal_cd, + GLOB_vmeta_arith_vno, + GLOB_vmeta_arith_vn, + GLOB_vmeta_arith_nvo, + GLOB_vmeta_arith_nv, + GLOB_vmeta_unm, + GLOB_vmeta_arith_vvo, + GLOB_vmeta_arith_vv, + GLOB_vmeta_len, + GLOB_BC_LEN_Z, + GLOB_vmeta_call_ra, + GLOB_BC_CALLT_Z, + GLOB_vmeta_for, + GLOB_ff_assert, + GLOB_fff_fallback, + GLOB_fff_res_, + GLOB_ff_type, + GLOB_fff_res1, + GLOB_ff_getmetatable, + GLOB_ff_setmetatable, + GLOB_ff_rawget, + GLOB_ff_tonumber, + GLOB_fff_resi, + GLOB_fff_resxmm0, + GLOB_fff_resn, + GLOB_ff_tostring, + GLOB_fff_gcstep, + GLOB_ff_next, + GLOB_fff_res2, + GLOB_fff_res, + GLOB_ff_pairs, + GLOB_ff_ipairs_aux, + GLOB_fff_res0, + GLOB_ff_ipairs, + GLOB_ff_pcall, + GLOB_ff_xpcall, + GLOB_ff_coroutine_resume, + GLOB_ff_coroutine_wrap_aux, + GLOB_ff_coroutine_yield, + GLOB_ff_math_abs, + GLOB_fff_resbit, + GLOB_ff_math_floor, + GLOB_vm_floor, + GLOB_ff_math_ceil, + GLOB_vm_ceil, + GLOB_ff_math_sqrt, + GLOB_ff_math_log, + GLOB_ff_math_log10, + GLOB_ff_math_exp, + GLOB_vm_exp_x87, + GLOB_ff_math_sin, + GLOB_ff_math_cos, + GLOB_ff_math_tan, + GLOB_ff_math_asin, + GLOB_ff_math_acos, + GLOB_ff_math_atan, + GLOB_ff_math_sinh, + GLOB_ff_math_cosh, + GLOB_ff_math_tanh, + GLOB_ff_math_deg, + GLOB_ff_math_rad, + GLOB_ff_math_atan2, + GLOB_ff_math_ldexp, + GLOB_ff_math_frexp, + GLOB_ff_math_modf, + GLOB_vm_trunc, + GLOB_ff_math_fmod, + GLOB_ff_math_pow, + GLOB_vm_pow, + GLOB_ff_math_min, + GLOB_ff_math_max, + GLOB_ff_string_len, + GLOB_ff_string_byte, + GLOB_ff_string_char, + GLOB_fff_newstr, + GLOB_ff_string_sub, + GLOB_fff_emptystr, + GLOB_ff_string_rep, + GLOB_fff_fallback_2, + GLOB_ff_string_reverse, + GLOB_fff_fallback_1, + GLOB_ff_string_lower, + GLOB_ff_string_upper, + GLOB_ff_table_getn, + GLOB_ff_bit_tobit, + GLOB_ff_bit_band, + GLOB_fff_fallback_bit_op, + GLOB_ff_bit_bor, + GLOB_ff_bit_bxor, + GLOB_ff_bit_bswap, + GLOB_ff_bit_bnot, + GLOB_ff_bit_lshift, + GLOB_ff_bit_rshift, + GLOB_ff_bit_arshift, + GLOB_ff_bit_rol, + GLOB_ff_bit_ror, + GLOB_vm_record, + GLOB_vm_rethook, + GLOB_vm_inshook, + GLOB_cont_hook, + GLOB_vm_hotloop, + GLOB_vm_callhook, + GLOB_vm_hotcall, + GLOB_vm_exit_handler, + GLOB_vm_exit_interp, + GLOB_vm_floor_sse, + GLOB_vm_ceil_sse, + GLOB_vm_trunc_sse, + GLOB_vm_mod, + GLOB_vm_exp2_x87, + GLOB_vm_exp2raw, + GLOB_vm_pow_sse, + GLOB_vm_powi_sse, + GLOB_vm_foldfpm, + GLOB_vm_foldarith, + GLOB_vm_cpuid, + GLOB_vm_ffi_call, + GLOB_assert_bad_for_arg_type, + GLOB_BC_MODVN_Z, + GLOB_BC_TGETS_Z, + GLOB_BC_TSETS_Z, + GLOB__MAX +}; +static const char *const globnames[] = { + "vm_returnp", + "cont_dispatch", + "vm_returnc", + "BC_RET_Z", + "vm_return", + "vm_leave_cp", + "vm_leave_unw", + "vm_unwind_c@8", + "vm_unwind_c_eh", + "vm_unwind_rethrow", + "vm_unwind_ff@4", + "vm_unwind_ff_eh", + "vm_growstack_c", + "vm_growstack_v", + "vm_growstack_f", + "vm_resume", + "vm_pcall", + "vm_call", + "vm_call_dispatch", + "vmeta_call", + "vm_call_dispatch_f", + "vm_cpcall", + "vm_call_tail", + "cont_cat", + "cont_ra", + "BC_CAT_Z", + "vmeta_tgets", + "vmeta_tgetb", + "vmeta_tgetv", + "vmeta_tsets", + "vmeta_tsetb", + "vmeta_tsetv", + "cont_nop", + "vmeta_comp", + "vmeta_binop", + "cont_condt", + "cont_condf", + "vmeta_equal", + "vmeta_equal_cd", + "vmeta_arith_vno", + "vmeta_arith_vn", + "vmeta_arith_nvo", + "vmeta_arith_nv", + "vmeta_unm", + "vmeta_arith_vvo", + "vmeta_arith_vv", + "vmeta_len", + "BC_LEN_Z", + "vmeta_call_ra", + "BC_CALLT_Z", + "vmeta_for", + "ff_assert", + "fff_fallback", + "fff_res_", + "ff_type", + "fff_res1", + "ff_getmetatable", + "ff_setmetatable", + "ff_rawget", + "ff_tonumber", + "fff_resi", + "fff_resxmm0", + "fff_resn", + "ff_tostring", + "fff_gcstep", + "ff_next", + "fff_res2", + "fff_res", + "ff_pairs", + "ff_ipairs_aux", + "fff_res0", + "ff_ipairs", + "ff_pcall", + "ff_xpcall", + "ff_coroutine_resume", + "ff_coroutine_wrap_aux", + "ff_coroutine_yield", + "ff_math_abs", + "fff_resbit", + "ff_math_floor", + "vm_floor", + "ff_math_ceil", + "vm_ceil", + "ff_math_sqrt", + "ff_math_log", + "ff_math_log10", + "ff_math_exp", + "vm_exp_x87", + "ff_math_sin", + "ff_math_cos", + "ff_math_tan", + "ff_math_asin", + "ff_math_acos", + "ff_math_atan", + "ff_math_sinh", + "ff_math_cosh", + "ff_math_tanh", + "ff_math_deg", + "ff_math_rad", + "ff_math_atan2", + "ff_math_ldexp", + "ff_math_frexp", + "ff_math_modf", + "vm_trunc", + "ff_math_fmod", + "ff_math_pow", + "vm_pow", + "ff_math_min", + "ff_math_max", + "ff_string_len", + "ff_string_byte", + "ff_string_char", + "fff_newstr", + "ff_string_sub", + "fff_emptystr", + "ff_string_rep", + "fff_fallback_2", + "ff_string_reverse", + "fff_fallback_1", + "ff_string_lower", + "ff_string_upper", + "ff_table_getn", + "ff_bit_tobit", + "ff_bit_band", + "fff_fallback_bit_op", + "ff_bit_bor", + "ff_bit_bxor", + "ff_bit_bswap", + "ff_bit_bnot", + "ff_bit_lshift", + "ff_bit_rshift", + "ff_bit_arshift", + "ff_bit_rol", + "ff_bit_ror", + "vm_record", + "vm_rethook", + "vm_inshook", + "cont_hook", + "vm_hotloop", + "vm_callhook", + "vm_hotcall", + "vm_exit_handler", + "vm_exit_interp", + "vm_floor_sse", + "vm_ceil_sse", + "vm_trunc_sse", + "vm_mod", + "vm_exp2_x87", + "vm_exp2raw", + "vm_pow_sse", + "vm_powi_sse", + "vm_foldfpm", + "vm_foldarith", + "vm_cpuid", + "vm_ffi_call@4", + "assert_bad_for_arg_type", + "BC_MODVN_Z", + "BC_TGETS_Z", + "BC_TSETS_Z", + (const char *)0 +}; +static const char *const extnames[] = { + "lj_state_growstack@8", + "lj_meta_tget", + "lj_meta_tset", + "lj_meta_comp", + "lj_meta_equal", + "lj_meta_equal_cd@8", + "lj_meta_arith", + "lj_meta_len@8", + "lj_meta_call", + "lj_meta_for@8", + "lj_tab_get", + "lj_str_fromnumber@8", + "lj_str_fromnum@8", + "lj_tab_next", + "lj_tab_getinth@8", + "lj_ffh_coroutine_wrap_err@8", + "lj_vm_sinh", + "lj_vm_cosh", + "lj_vm_tanh", + "lj_str_new", + "lj_tab_len@4", + "lj_gc_step@4", + "lj_dispatch_ins@8", + "lj_trace_hot@8", + "lj_dispatch_call@8", + "lj_trace_exit@8", + "lj_err_throw@8", + "lj_meta_cat", + "lj_gc_barrieruv@8", + "lj_func_closeuv@8", + "lj_func_newL_gc", + "lj_tab_new", + "lj_gc_step_fixtop@4", + "lj_tab_dup@8", + "lj_tab_newkey", + "lj_tab_reasize", + (const char *)0 +}; +#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V) +#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V) +#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V) +#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V) +#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V) +#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V) +#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V) +#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V) +#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V) +#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V) +#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V) +#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V) +#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V) +#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field)) +#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field)) +#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto)) + +/* Generate subroutines used by opcodes and other parts of the VM. */ +/* The .code_sub section should be last to help static branch prediction. */ +static void build_subroutines(BuildCtx *ctx, int cmov, int sse) +{ + dasm_put(Dst, 0); + dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C); + dasm_put(Dst, 114, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL); + dasm_put(Dst, 200, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK); + dasm_put(Dst, 275, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base)); + dasm_put(Dst, 353, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE); + dasm_put(Dst, 495, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base)); + dasm_put(Dst, 573, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL); + dasm_put(Dst, 753, Dt7(->pc), PC2PROTO(k), LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB); + dasm_put(Dst, 875); + if (LJ_DUALNUM) { + dasm_put(Dst, 881, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 891); + } else { + dasm_put(Dst, 904); + } + dasm_put(Dst, 917, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET); + dasm_put(Dst, 1069, DISPATCH_GL(tmptv), LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 881, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 891); + } else { + dasm_put(Dst, 904); + } + dasm_put(Dst, 1092, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 1286, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base)); + dasm_put(Dst, 1393); +#if LJ_HASFFI + dasm_put(Dst, 1408, Dt1(->base)); +#endif + dasm_put(Dst, 1439); +#if LJ_DUALNUM + dasm_put(Dst, 1442); +#endif + dasm_put(Dst, 1448); +#if LJ_DUALNUM + dasm_put(Dst, 875); +#endif + dasm_put(Dst, 1460); +#if LJ_DUALNUM + dasm_put(Dst, 1442); +#endif + dasm_put(Dst, 1488, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base)); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 1598); +#else + dasm_put(Dst, 1617); +#endif + dasm_put(Dst, 1622, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND); + dasm_put(Dst, 1811, 1+1, ~LJ_TNUMX); + if (cmov) { + dasm_put(Dst, 1869); + } else { + dasm_put(Dst, 1873); + } + dasm_put(Dst, 1882, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB); + dasm_put(Dst, 1965, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next), LJ_TNIL); + dasm_put(Dst, 2023, LJ_TUDATA, LJ_TISNUM, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1); + dasm_put(Dst, 2086, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + dasm_put(Dst, 2158, 2+1, LJ_TTAB, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2247); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 2269); + } else { + dasm_put(Dst, 2279); + } + dasm_put(Dst, 2286, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 2352, Dt1(->base)); + if (LJ_DUALNUM) { + dasm_put(Dst, 2376); + } else { + dasm_put(Dst, 2381); + } + dasm_put(Dst, 2386, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2); + dasm_put(Dst, 2495, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2542, Dt6(->metatable)); +#endif + dasm_put(Dst, 2551, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2537); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 2606); + if (LJ_DUALNUM) { + dasm_put(Dst, 2611, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2627); + } else { + dasm_put(Dst, 2666); + } + dasm_put(Dst, 2684, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0); + dasm_put(Dst, 2522, 1+1, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 2542, Dt6(->metatable)); +#endif + dasm_put(Dst, 2765, Dt8(->upvalue[0]), LJ_TFUNC); + if (LJ_DUALNUM) { + dasm_put(Dst, 2786, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 2798); + } else { + dasm_put(Dst, 2808); + } + dasm_put(Dst, 2815, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC); + dasm_put(Dst, 2879, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top)); + dasm_put(Dst, 2967, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP); + dasm_put(Dst, 3068, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE); + dasm_put(Dst, 3182, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe)); + dasm_put(Dst, 3280, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top)); + dasm_put(Dst, 3346, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack)); + dasm_put(Dst, 3447, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME); + dasm_put(Dst, 3560, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status)); + if (!LJ_DUALNUM) { + dasm_put(Dst, 3586); + } + if (sse) { + dasm_put(Dst, 3589); + } + dasm_put(Dst, 3604, 1+1); + if (LJ_DUALNUM) { + dasm_put(Dst, 3615, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 3695, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3705); + } else { + dasm_put(Dst, 3741); + } + dasm_put(Dst, 3758, 1+1, FRAME_TYPE, LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 3850, LJ_TISNUM); + } else { + dasm_put(Dst, 3695, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3872); + if (LJ_DUALNUM) { + dasm_put(Dst, 3881); + } + dasm_put(Dst, 2274); + } else { + dasm_put(Dst, 3915); + if (LJ_DUALNUM) { + dasm_put(Dst, 3921); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + dasm_put(Dst, 3957); + } else { + dasm_put(Dst, 2281); + } + } + dasm_put(Dst, 3974); + if (LJ_DUALNUM) { + dasm_put(Dst, 3850, LJ_TISNUM); + } else { + dasm_put(Dst, 3695, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 3977); + if (LJ_DUALNUM) { + dasm_put(Dst, 3881); + } + dasm_put(Dst, 2274); + } else { + dasm_put(Dst, 3986); + if (LJ_DUALNUM) { + dasm_put(Dst, 3921); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + dasm_put(Dst, 3957); + } else { + dasm_put(Dst, 2281); + } + } + if (sse) { + dasm_put(Dst, 3992, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4021, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4050, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4119, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4176, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1); + dasm_put(Dst, 4239, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM); + dasm_put(Dst, 4329); + if (sse) { + dasm_put(Dst, 4341, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4372, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4397); + if (sse) { + dasm_put(Dst, 4411, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4442, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4467); + if (sse) { + dasm_put(Dst, 4481, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4512, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4537); + if (sse) { + dasm_put(Dst, 4553, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } else { + dasm_put(Dst, 4592, 1+1, LJ_TISNUM, Dt8(->upvalue[0])); + } + dasm_put(Dst, 4625, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM); + dasm_put(Dst, 4690, 1+1, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4789); + } else { + dasm_put(Dst, 4795); + } + dasm_put(Dst, 4804); + if (sse) { + dasm_put(Dst, 4829); + } else { + dasm_put(Dst, 4835); + } + dasm_put(Dst, 4838, 1+2); + if (sse) { + dasm_put(Dst, 4847); + } else { + dasm_put(Dst, 4855); + } + dasm_put(Dst, 4863); + if (sse) { + dasm_put(Dst, 4866); + } else { + dasm_put(Dst, 4898); + } + dasm_put(Dst, 4917); + if (sse) { + dasm_put(Dst, 4933, 1+1, LJ_TISNUM); + } else { + dasm_put(Dst, 4958, 1+1, LJ_TISNUM); + } + dasm_put(Dst, 4980); + if (sse) { + dasm_put(Dst, 5002); + } else { + dasm_put(Dst, 5028); + } + dasm_put(Dst, 5045, 1+2); + if (sse) { + dasm_put(Dst, 5085); + } else { + dasm_put(Dst, 5093); + } + dasm_put(Dst, 5103, 2+1, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 5155, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 5202, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 5243, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5256, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4789); + } else { + dasm_put(Dst, 4795); + } + dasm_put(Dst, 5306); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 5317, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5338); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5359); + } else { + dasm_put(Dst, 5384, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5402); + } else { + dasm_put(Dst, 5420); + } + dasm_put(Dst, 5425); + if (cmov) { + dasm_put(Dst, 5435); + } else { + dasm_put(Dst, 5443); + } + dasm_put(Dst, 5376); + } + dasm_put(Dst, 5464, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5477, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 4789); + } else { + dasm_put(Dst, 4795); + } + dasm_put(Dst, 5306); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 5317, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5338); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5527); + } else { + dasm_put(Dst, 5384, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5402); + } else { + dasm_put(Dst, 5420); + } + dasm_put(Dst, 5425); + if (cmov) { + dasm_put(Dst, 5552); + } else { + dasm_put(Dst, 5560); + } + dasm_put(Dst, 5376); + } + if (!sse) { + dasm_put(Dst, 5581); + } + dasm_put(Dst, 5590, 1+1, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 5612, Dt5(->len)); + } else if (sse) { + dasm_put(Dst, 5620, Dt5(->len)); + } else { + dasm_put(Dst, 5631, Dt5(->len)); + } + dasm_put(Dst, 5639, 1+1, LJ_TSTR, Dt5(->len), Dt5([1])); + if (LJ_DUALNUM) { + dasm_put(Dst, 3969); + } else if (sse) { + dasm_put(Dst, 5677); + } else { + dasm_put(Dst, 5687); + } + dasm_put(Dst, 5700, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5731); + } else if (sse) { + dasm_put(Dst, 5754); + } else { + dasm_put(Dst, 5780); + } + dasm_put(Dst, 5804, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 5907); + } else if (sse) { + dasm_put(Dst, 5919); + } else { + dasm_put(Dst, 5934); + } + dasm_put(Dst, 5946, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 2537); + } else { + dasm_put(Dst, 2264); + } + dasm_put(Dst, 5963, Dt5(->len)); + if (LJ_DUALNUM) { + dasm_put(Dst, 5973); + } else if (sse) { + dasm_put(Dst, 5977); + } else { + dasm_put(Dst, 5984); + } + dasm_put(Dst, 5996, sizeof(GCstr)-1); + dasm_put(Dst, 6071, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold)); + dasm_put(Dst, 6130, LJ_TSTR, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6147); + } else if (sse) { + dasm_put(Dst, 6155); + } else { + dasm_put(Dst, 6166); + } + dasm_put(Dst, 6182, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 6247, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 6310, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz)); + dasm_put(Dst, 6381, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1); + dasm_put(Dst, 6466, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf)); + dasm_put(Dst, 6536, 1+1, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 6604); + } else if (sse) { + dasm_put(Dst, 6611); + } else { + dasm_put(Dst, 6621); + } + dasm_put(Dst, 6632, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6648); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 6694); + } + dasm_put(Dst, 111); + if (LJ_DUALNUM || sse) { + if (!sse) { + dasm_put(Dst, 6712); + } + dasm_put(Dst, 6716); + } else { + dasm_put(Dst, 6624); + } + dasm_put(Dst, 6721, 1+1); + if (sse) { + dasm_put(Dst, 6732); + } else { + dasm_put(Dst, 6747); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6773); + } else { + dasm_put(Dst, 6788); + } + dasm_put(Dst, 6801, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6826); + } else { + dasm_put(Dst, 6846); + } + if (sse) { + dasm_put(Dst, 6851); + } else { + dasm_put(Dst, 6868); + } + dasm_put(Dst, 6881, 1+1); + if (sse) { + dasm_put(Dst, 6732); + } else { + dasm_put(Dst, 6747); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6773); + } else { + dasm_put(Dst, 6788); + } + dasm_put(Dst, 6801, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6899); + } else { + dasm_put(Dst, 6846); + } + if (sse) { + dasm_put(Dst, 6919); + } else { + dasm_put(Dst, 6936); + } + dasm_put(Dst, 6949, 1+1); + if (sse) { + dasm_put(Dst, 6732); + } else { + dasm_put(Dst, 6747); + } + dasm_put(Dst, 2241, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6773); + } else { + dasm_put(Dst, 6788); + } + dasm_put(Dst, 6801, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6967); + } else { + dasm_put(Dst, 6846); + } + if (sse) { + dasm_put(Dst, 6987); + } else { + dasm_put(Dst, 7004); + } + dasm_put(Dst, 7017, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7061, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7085); + if (LJ_DUALNUM) { + dasm_put(Dst, 6716); + } else if (sse) { + dasm_put(Dst, 7091); + } else { + dasm_put(Dst, 7103); + } + dasm_put(Dst, 7116); + if (LJ_DUALNUM) { + dasm_put(Dst, 7127, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7143, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7158, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 7230, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 7294); + if (LJ_DUALNUM) { + dasm_put(Dst, 7301, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7143, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7317, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 7389, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 7453); + if (LJ_DUALNUM) { + dasm_put(Dst, 7461, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7143, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7477, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 7549, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 7613); + if (LJ_DUALNUM) { + dasm_put(Dst, 7621, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7143, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7637, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 7709, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 7773); + if (LJ_DUALNUM) { + dasm_put(Dst, 7780, 1+1, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 6756); + } else { + dasm_put(Dst, 2264); + } + if (sse) { + dasm_put(Dst, 6665); + } else { + dasm_put(Dst, 7040); + } + dasm_put(Dst, 7143, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 7796, 2+1, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 7868, 2+1, LJ_TISNUM, LJ_TISNUM); + } + dasm_put(Dst, 7932, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base)); + dasm_put(Dst, 8008, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base)); + dasm_put(Dst, 8132, Dt1(->top), Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 8170, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount)); +#endif + dasm_put(Dst, 8201, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE); + dasm_put(Dst, 8252, Dt1(->base), Dt1(->base), GG_DISP2STATIC); +#if LJ_HASJIT + dasm_put(Dst, 8318, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L)); +#endif + dasm_put(Dst, 8364); +#if LJ_HASJIT + dasm_put(Dst, 8196); +#endif + dasm_put(Dst, 8371); +#if LJ_HASJIT + dasm_put(Dst, 8374); +#endif + dasm_put(Dst, 8384, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 8418); +#endif + dasm_put(Dst, 8423, Dt1(->base), Dt1(->top)); +#if LJ_HASJIT + dasm_put(Dst, 8452, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 8*8+16, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC); +#endif + dasm_put(Dst, 8595); +#if LJ_HASJIT + dasm_put(Dst, 8598, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF); +#endif + dasm_put(Dst, 8676); + if (!sse) { + dasm_put(Dst, 8679); + } + dasm_put(Dst, 8724); + if (!sse) { + dasm_put(Dst, 8826); + } + dasm_put(Dst, 8871); + if (!sse) { + dasm_put(Dst, 8973); + } + dasm_put(Dst, 9012); + if (sse) { + dasm_put(Dst, 9117); + } else { + dasm_put(Dst, 9247); + } + dasm_put(Dst, 9294); + if (!sse) { + dasm_put(Dst, 9368); + if (cmov) { + dasm_put(Dst, 9379); + } else { + dasm_put(Dst, 9383); + } + dasm_put(Dst, 9390); + dasm_put(Dst, 9464); + dasm_put(Dst, 9564); + if (cmov) { + dasm_put(Dst, 9567); + } else { + dasm_put(Dst, 9571); + } + dasm_put(Dst, 9578); + if (cmov) { + dasm_put(Dst, 9379); + } else { + dasm_put(Dst, 9383); + } + dasm_put(Dst, 9596); + } else { + dasm_put(Dst, 9675); + } + dasm_put(Dst, 9678); + dasm_put(Dst, 9763); + dasm_put(Dst, 9893); + dasm_put(Dst, 10099); +#if LJ_HASJIT + if (sse) { + dasm_put(Dst, 10106); + dasm_put(Dst, 10163); + dasm_put(Dst, 10254); + } else { + dasm_put(Dst, 10296); + dasm_put(Dst, 10388); + } + dasm_put(Dst, 10434); +#endif + dasm_put(Dst, 10438); + if (sse) { + dasm_put(Dst, 10441); + dasm_put(Dst, 10546); + dasm_put(Dst, 10629); + } else { + dasm_put(Dst, 10701); + dasm_put(Dst, 10784); + if (cmov) { + dasm_put(Dst, 10839); + } else { + dasm_put(Dst, 10858); + } + dasm_put(Dst, 10434); + } + dasm_put(Dst, 10899); +#if LJ_HASFFI +#define DtE(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V) + dasm_put(Dst, 10955, DtE(->spadj)); +#if LJ_TARGET_WINDOWS + dasm_put(Dst, 10965, DtE(->spadj)); +#endif + dasm_put(Dst, 10969, DtE(->nsp), offsetof(CCallState, stack), DtE(->gpr[0]), DtE(->gpr[1]), DtE(->func), DtE(->gpr[0]), DtE(->gpr[1]), DtE(->resx87), DtE(->fpr[0].d[0])); + dasm_put(Dst, 11039, DtE(->fpr[0].f[0])); +#if LJ_TARGET_WINDOWS + dasm_put(Dst, 11045, DtE(->spadj)); +#endif + dasm_put(Dst, 11049); +#endif + dasm_put(Dst, 11056); +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 10436); +#endif + dasm_put(Dst, 10436); +} + +/* Generate the code for a single instruction. */ +static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse) +{ + int vk = 0; + dasm_put(Dst, 11059, defop); + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + /* Remember: all ops branch for a true comparison, fall through otherwise. */ + + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: + if (LJ_DUALNUM) { + dasm_put(Dst, 11061, LJ_TISNUM, LJ_TISNUM); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 11091); + break; + case BC_ISGE: + dasm_put(Dst, 11096); + break; + case BC_ISLE: + dasm_put(Dst, 11101); + break; + case BC_ISGT: + dasm_put(Dst, 11106); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 11111, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11164); + } else { + dasm_put(Dst, 11175); + } + dasm_put(Dst, 11186); + if (sse) { + dasm_put(Dst, 11193); + switch (op) { + case BC_ISLT: + dasm_put(Dst, 11213); + break; + case BC_ISGE: + dasm_put(Dst, 11218); + break; + case BC_ISLE: + dasm_put(Dst, 11223); + break; + case BC_ISGT: + dasm_put(Dst, 11228); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 11233); + } else { + dasm_put(Dst, 11238); + } + } else { + dasm_put(Dst, 11246, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11267); + } else { + dasm_put(Dst, 11288); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + } + if (LJ_DUALNUM) { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 11213); + break; + case BC_ISGE: + dasm_put(Dst, 11218); + break; + case BC_ISLE: + dasm_put(Dst, 11223); + break; + case BC_ISGT: + dasm_put(Dst, 11228); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 11233); + } else { + switch (op) { + case BC_ISLT: + dasm_put(Dst, 11304); + break; + case BC_ISGE: + dasm_put(Dst, 11309); + break; + case BC_ISLE: + dasm_put(Dst, 11314); + break; + case BC_ISGT: + dasm_put(Dst, 11319); + break; + default: break; /* Shut up GCC. */ + } + dasm_put(Dst, 11324, -BCBIAS_J*4); + } + break; + + case BC_ISEQV: case BC_ISNEV: + vk = op == BC_ISEQV; + dasm_put(Dst, 11355); + if (LJ_DUALNUM) { + dasm_put(Dst, 11363, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 11388); + } else { + dasm_put(Dst, 11393); + } + dasm_put(Dst, 11398, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11449); + } else { + dasm_put(Dst, 11456); + } + dasm_put(Dst, 11460); + if (sse) { + dasm_put(Dst, 11471); + } else { + dasm_put(Dst, 11483); + } + dasm_put(Dst, 11490); + } else { + dasm_put(Dst, 11495, LJ_TISNUM, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11514); + } else { + dasm_put(Dst, 11532); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + } + iseqne_fp: + if (vk) { + dasm_put(Dst, 11545); + } else { + dasm_put(Dst, 11554); + } + iseqne_end: + if (vk) { + dasm_put(Dst, 11563, -BCBIAS_J*4); + if (!LJ_HASFFI) { + dasm_put(Dst, 4844); + } + } else { + if (!LJ_HASFFI) { + dasm_put(Dst, 4844); + } + dasm_put(Dst, 11578, -BCBIAS_J*4); + } + if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV || + op == BC_ISEQN || op == BC_ISNEN)) { + dasm_put(Dst, 11593); + } else { + dasm_put(Dst, 11336); + } + if (op == BC_ISEQV || op == BC_ISNEV) { + dasm_put(Dst, 11598); + if (LJ_HASFFI) { + dasm_put(Dst, 11601, LJ_TCDATA, LJ_TCDATA); + } + dasm_put(Dst, 11620, LJ_TISPRI, LJ_TISTABUD, Dt6(->metatable), Dt6(->nomm), 1<<MM_eq); + if (vk) { + dasm_put(Dst, 11676); + } else { + dasm_put(Dst, 11680); + } + dasm_put(Dst, 11686); + } else if (LJ_HASFFI) { + dasm_put(Dst, 11691, LJ_TCDATA); + if (LJ_DUALNUM && vk) { + dasm_put(Dst, 11698); + } else { + dasm_put(Dst, 11671); + } + dasm_put(Dst, 11703); + } + break; + case BC_ISEQS: case BC_ISNES: + vk = op == BC_ISEQS; + dasm_put(Dst, 11708, LJ_TSTR); + iseqne_test: + if (vk) { + dasm_put(Dst, 11549); + } else { + dasm_put(Dst, 748); + } + goto iseqne_end; + case BC_ISEQN: case BC_ISNEN: + vk = op == BC_ISEQN; + dasm_put(Dst, 11733); + if (LJ_DUALNUM) { + dasm_put(Dst, 11741, LJ_TISNUM, LJ_TISNUM); + if (vk) { + dasm_put(Dst, 11388); + } else { + dasm_put(Dst, 11393); + } + dasm_put(Dst, 11766, -BCBIAS_J*4, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 11813); + } else { + dasm_put(Dst, 11820); + } + dasm_put(Dst, 11824); + if (sse) { + dasm_put(Dst, 11831); + } else { + dasm_put(Dst, 11843); + } + dasm_put(Dst, 11490); + } else { + dasm_put(Dst, 11850, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 11859); + } else { + dasm_put(Dst, 11877); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + } + goto iseqne_fp; + case BC_ISEQP: case BC_ISNEP: + vk = op == BC_ISEQP; + dasm_put(Dst, 11890); + if (!LJ_HASFFI) goto iseqne_test; + if (vk) { + dasm_put(Dst, 11903, -BCBIAS_J*4, LJ_TCDATA); + } else { + dasm_put(Dst, 11952, LJ_TCDATA, -BCBIAS_J*4); + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF: + dasm_put(Dst, 11995, LJ_TISTRUECOND); + if (op == BC_IST || op == BC_ISTC) { + dasm_put(Dst, 11319); + } else { + dasm_put(Dst, 11314); + } + if (op == BC_ISTC || op == BC_ISFC) { + dasm_put(Dst, 12007); + } + dasm_put(Dst, 11324, -BCBIAS_J*4); + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_MOV: + dasm_put(Dst, 12018); + break; + case BC_NOT: + dasm_put(Dst, 12051, LJ_TISTRUECOND, LJ_TTRUE); + break; + case BC_UNM: + if (LJ_DUALNUM) { + dasm_put(Dst, 12086, LJ_TISNUM, LJ_TISNUM); + } else { + dasm_put(Dst, 12162, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12173); + } else { + dasm_put(Dst, 12203); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 11593); + } else { + dasm_put(Dst, 11336); + } + break; + case BC_LEN: + dasm_put(Dst, 12212, LJ_TSTR); + if (LJ_DUALNUM) { + dasm_put(Dst, 12226, Dt5(->len), LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 12240, Dt5(->len)); + } else { + dasm_put(Dst, 12258, Dt5(->len)); + } + dasm_put(Dst, 12267, LJ_TTAB); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 12301, Dt6(->metatable)); +#endif + dasm_put(Dst, 12315); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 12324); + } else { + dasm_put(Dst, 12330); + } + dasm_put(Dst, 12337); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + dasm_put(Dst, 12350, Dt6(->nomm), 1<<MM_len); +#endif + break; + + /* -- Binary ops -------------------------------------------------------- */ + + + case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12374, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 12407, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 12440, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 12473, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 12236); + } else { + dasm_put(Dst, 12014); + } + dasm_put(Dst, 11336); + } else { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12502); + } else { + dasm_put(Dst, 12516); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12547); + } else { + dasm_put(Dst, 12561); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12591); + } else { + dasm_put(Dst, 12605); + } + break; + } + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + } + break; + case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12613, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 12646, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 12679, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 12473, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 12236); + } else { + dasm_put(Dst, 12014); + } + dasm_put(Dst, 11336); + } else { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12712); + } else { + dasm_put(Dst, 12726); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12734); + } else { + dasm_put(Dst, 12748); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12756); + } else { + dasm_put(Dst, 12770); + } + break; + } + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + } + break; + case BC_MULVN: case BC_MULNV: case BC_MULVV: + if (LJ_DUALNUM) { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12778, LJ_TISNUM, LJ_TISNUM); + break; + case 1: + dasm_put(Dst, 12812, LJ_TISNUM, LJ_TISNUM); + break; + default: + dasm_put(Dst, 12846, LJ_TISNUM, LJ_TISNUM); + break; + } + dasm_put(Dst, 12473, LJ_TISNUM); + if (vk == 1) { + dasm_put(Dst, 12236); + } else { + dasm_put(Dst, 12014); + } + dasm_put(Dst, 11336); + } else { + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12880); + } else { + dasm_put(Dst, 12894); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12902); + } else { + dasm_put(Dst, 12916); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12924); + } else { + dasm_put(Dst, 12938); + } + break; + } + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + } + break; + case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12946); + } else { + dasm_put(Dst, 12960); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 12968); + } else { + dasm_put(Dst, 12982); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 12990); + } else { + dasm_put(Dst, 13004); + } + break; + } + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + break; + case BC_MODVN: + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13012); + } else { + dasm_put(Dst, 13026); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13034); + } else { + dasm_put(Dst, 13048); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 13056); + } else { + dasm_put(Dst, 13070); + } + break; + } + dasm_put(Dst, 13078); + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + break; + case BC_MODNV: case BC_MODVV: + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13012); + } else { + dasm_put(Dst, 13026); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13034); + } else { + dasm_put(Dst, 13048); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 13056); + } else { + dasm_put(Dst, 13070); + } + break; + } + dasm_put(Dst, 13084); + break; + case BC_POW: + dasm_put(Dst, 12366); + vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); + switch (vk) { + case 0: + dasm_put(Dst, 12479, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12491, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13012); + } else { + dasm_put(Dst, 13026); + } + break; + case 1: + dasm_put(Dst, 12524, LJ_TISNUM); + if (LJ_DUALNUM) { + dasm_put(Dst, 12536, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 13034); + } else { + dasm_put(Dst, 13048); + } + break; + default: + dasm_put(Dst, 12569, LJ_TISNUM, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 13056); + } else { + dasm_put(Dst, 13070); + } + break; + } + dasm_put(Dst, 13089); + if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 11336); + break; + + case BC_CAT: + dasm_put(Dst, 13093, Dt1(->base), Dt1(->base)); + break; + + /* -- Constant ops ------------------------------------------------------ */ + + case BC_KSTR: + dasm_put(Dst, 13187, LJ_TSTR); + break; + case BC_KCDATA: +#if LJ_HASFFI + dasm_put(Dst, 13187, LJ_TCDATA); +#endif + break; + case BC_KSHORT: + if (LJ_DUALNUM) { + dasm_put(Dst, 13220, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 13232); + } else { + dasm_put(Dst, 13247); + } + dasm_put(Dst, 11336); + break; + case BC_KNUM: + if (sse) { + dasm_put(Dst, 13255); + } else { + dasm_put(Dst, 13268); + } + dasm_put(Dst, 11336); + break; + case BC_KPRI: + dasm_put(Dst, 13275); + break; + case BC_KNIL: + dasm_put(Dst, 13301, LJ_TNIL); + break; + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + dasm_put(Dst, 13347, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_USETV: +#define TV2MARKOFS \ + ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)) + dasm_put(Dst, 13391, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G); + dasm_put(Dst, 13481); + break; +#undef TV2MARKOFS + case BC_USETS: + dasm_put(Dst, 13493, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G); + break; + case BC_USETN: + dasm_put(Dst, 13584); + if (sse) { + dasm_put(Dst, 13589); + } else { + dasm_put(Dst, 11846); + } + dasm_put(Dst, 13596, offsetof(GCfuncL, uvptr), DtA(->v)); + if (sse) { + dasm_put(Dst, 13605); + } else { + dasm_put(Dst, 13611); + } + dasm_put(Dst, 11336); + break; + case BC_USETP: + dasm_put(Dst, 13614, offsetof(GCfuncL, uvptr), DtA(->v)); + break; + case BC_UCLO: + dasm_put(Dst, 13651, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base)); + break; + + case BC_FNEW: + dasm_put(Dst, 13705, Dt1(->base), Dt1(->base), LJ_TFUNC); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_TNEW: + dasm_put(Dst, 13776, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB); + break; + case BC_TDUP: + dasm_put(Dst, 13902, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB); + break; + + case BC_GGET: + dasm_put(Dst, 13994, Dt7(->env)); + break; + case BC_GSET: + dasm_put(Dst, 14012, Dt7(->env)); + break; + + case BC_TGETV: + dasm_put(Dst, 14030, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 14053, LJ_TISNUM); + } else { + dasm_put(Dst, 14067, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 14078); + } else { + dasm_put(Dst, 14099); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + dasm_put(Dst, 2680); + } + dasm_put(Dst, 14109); + } + dasm_put(Dst, 14114, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index, LJ_TNIL); + dasm_put(Dst, 14209, LJ_TSTR); + break; + case BC_TGETS: + dasm_put(Dst, 14227, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 14315, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + break; + case BC_TGETB: + dasm_put(Dst, 14385, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_index); + dasm_put(Dst, 14484, LJ_TNIL); + break; + + case BC_TSETV: + dasm_put(Dst, 14501, LJ_TTAB); + if (LJ_DUALNUM) { + dasm_put(Dst, 14053, LJ_TISNUM); + } else { + dasm_put(Dst, 14067, LJ_TISNUM); + if (sse) { + dasm_put(Dst, 14078); + } else { + dasm_put(Dst, 14099); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + dasm_put(Dst, 2680); + } + dasm_put(Dst, 14524); + } + dasm_put(Dst, 14529, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex); + dasm_put(Dst, 14613, LJ_TSTR, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETS: + dasm_put(Dst, 14670, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL); + dasm_put(Dst, 14745, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, DtB(->next)); + dasm_put(Dst, 14837, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, LJ_TSTR, Dt1(->base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + case BC_TSETB: + dasm_put(Dst, 14933, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable)); + dasm_put(Dst, 15031, Dt6(->metatable), Dt6(->nomm), 1<<MM_newindex, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + case BC_TSETM: + dasm_put(Dst, 15077, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base)); + dasm_put(Dst, 15226, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist)); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_CALL: case BC_CALLM: + dasm_put(Dst, 12370); + if (op == BC_CALLM) { + dasm_put(Dst, 15244); + } + dasm_put(Dst, 15249, LJ_TFUNC, Dt7(->pc)); + break; + + case BC_CALLMT: + dasm_put(Dst, 15244); + break; + case BC_CALLT: + dasm_put(Dst, 15290, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc)); + dasm_put(Dst, 15408, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG); + break; + + case BC_ITERC: + dasm_put(Dst, 15478, LJ_TFUNC, 2+1, Dt7(->pc)); + break; + + case BC_ITERN: +#if LJ_HASJIT +#endif + dasm_put(Dst, 15558, Dt6(->asize), Dt6(->array), LJ_TNIL); + if (LJ_DUALNUM) { + dasm_put(Dst, 12231, LJ_TISNUM); + } else if (sse) { + dasm_put(Dst, 12324); + } else { + dasm_put(Dst, 15604); + } + dasm_put(Dst, 15610); + if (LJ_DUALNUM) { + } else if (sse) { + dasm_put(Dst, 12196); + } else { + dasm_put(Dst, 12208); + } + dasm_put(Dst, 15629, -BCBIAS_J*4); + if (!LJ_DUALNUM && !sse) { + dasm_put(Dst, 15680); + } + dasm_put(Dst, 15686, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key.gcr), DtB(->key.it), DtB(->val.gcr), DtB(->val.it)); + dasm_put(Dst, 15761); + break; + + case BC_ISNEXT: + dasm_put(Dst, 15769, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC); + break; + + case BC_VARG: + dasm_put(Dst, 15868, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack)); + dasm_put(Dst, 16032, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top)); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + dasm_put(Dst, 15244); + break; + + case BC_RET: case BC_RET0: case BC_RET1: + if (op != BC_RET0) { + dasm_put(Dst, 16103); + } + dasm_put(Dst, 16107, FRAME_TYPE); + switch (op) { + case BC_RET: + dasm_put(Dst, 16126); + break; + case BC_RET1: + dasm_put(Dst, 16184); + /* fallthrough */ + case BC_RET0: + dasm_put(Dst, 16200); + default: + break; + } + dasm_put(Dst, 16211, Dt7(->pc), PC2PROTO(k)); + if (op == BC_RET) { + dasm_put(Dst, 16253, LJ_TNIL); + } else { + dasm_put(Dst, 16262, LJ_TNIL); + } + dasm_put(Dst, 16269, -FRAME_VARG, FRAME_TYPEP); + if (op != BC_RET0) { + dasm_put(Dst, 16293); + } + dasm_put(Dst, 4928); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + + case BC_FORL: +#if LJ_HASJIT + dasm_put(Dst, 16297, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JFORI: + case BC_JFORL: +#if !LJ_HASJIT + break; +#endif + case BC_FORI: + case BC_IFORL: + vk = (op == BC_IFORL || op == BC_JFORL); + dasm_put(Dst, 16318); + if (LJ_DUALNUM) { + dasm_put(Dst, 16322, LJ_TISNUM); + if (!vk) { + dasm_put(Dst, 16332, LJ_TISNUM, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 16361, LJ_TISNUM, LJ_TISNUM); +#endif + dasm_put(Dst, 16380); + } + dasm_put(Dst, 16399, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 16410, -BCBIAS_J*4); + } else if (op == BC_JFORI) { + dasm_put(Dst, 16424, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 16442, -BCBIAS_J*4); + } else { + dasm_put(Dst, 16434, BC_JLOOP); + } + dasm_put(Dst, 16456); + if (vk) { + dasm_put(Dst, 16479); + } + dasm_put(Dst, 16399, LJ_TISNUM); + if (op == BC_FORI) { + dasm_put(Dst, 16488); + } else if (op == BC_JFORI) { + dasm_put(Dst, 16493, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + dasm_put(Dst, 16507); + } else { + dasm_put(Dst, 16503, BC_JLOOP); + } + dasm_put(Dst, 16512); + } else if (!vk) { + dasm_put(Dst, 16519, LJ_TISNUM); + } + if (!vk) { + dasm_put(Dst, 16525, LJ_TISNUM); + } else { +#ifdef LUA_USE_ASSERT + dasm_put(Dst, 16539, LJ_TISNUM, LJ_TISNUM); +#endif + } + dasm_put(Dst, 16558); + if (!vk) { + dasm_put(Dst, 16562, LJ_TISNUM); + } + if (sse) { + dasm_put(Dst, 16571); + if (vk) { + dasm_put(Dst, 16583); + } else { + dasm_put(Dst, 16602); + } + dasm_put(Dst, 16607); + } else { + dasm_put(Dst, 16620); + if (vk) { + dasm_put(Dst, 16626); + } else { + dasm_put(Dst, 16642); + } + dasm_put(Dst, 16650); + if (cmov) { + dasm_put(Dst, 3944); + } else { + dasm_put(Dst, 3950); + } + if (!cmov) { + dasm_put(Dst, 16655); + } + } + if (op == BC_FORI) { + if (LJ_DUALNUM) { + dasm_put(Dst, 16661); + } else { + dasm_put(Dst, 16666, -BCBIAS_J*4); + } + } else if (op == BC_JFORI) { + dasm_put(Dst, 16676, -BCBIAS_J*4, BC_JLOOP); + } else if (op == BC_IFORL) { + if (LJ_DUALNUM) { + dasm_put(Dst, 16690); + } else { + dasm_put(Dst, 16695, -BCBIAS_J*4); + } + } else { + dasm_put(Dst, 16686, BC_JLOOP); + } + if (LJ_DUALNUM) { + dasm_put(Dst, 11233); + } else { + dasm_put(Dst, 11974); + } + if (sse) { + dasm_put(Dst, 16705); + } + break; + + case BC_ITERL: +#if LJ_HASJIT + dasm_put(Dst, 16297, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_JITERL: +#if !LJ_HASJIT + break; +#endif + case BC_IITERL: + dasm_put(Dst, 16716, LJ_TNIL); + if (op == BC_JITERL) { + dasm_put(Dst, 16731, BC_JLOOP); + } else { + dasm_put(Dst, 16745, -BCBIAS_J*4); + } + dasm_put(Dst, 11334); + break; + + case BC_LOOP: +#if LJ_HASJIT + dasm_put(Dst, 16297, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + break; + + case BC_ILOOP: + dasm_put(Dst, 11336); + break; + + case BC_JLOOP: +#if LJ_HASJIT + dasm_put(Dst, 16761, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L)); +#endif + break; + + case BC_JMP: + dasm_put(Dst, 16784, -BCBIAS_J*4); + break; + + /* -- Function headers -------------------------------------------------- */ + + /* + ** Reminder: A function may be called with func/args above L->maxstack, + ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot, + ** too. This means all FUNC* ops (including fast functions) must check + ** for stack overflow _before_ adding more slots! + */ + + case BC_FUNCF: +#if LJ_HASJIT + dasm_put(Dst, 16808, HOTCOUNT_PCMASK, GG_DISP2HOT); +#endif + case BC_FUNCV: /* NYI: compiled vararg functions. */ + break; + + case BC_JFUNCF: +#if !LJ_HASJIT + break; +#endif + case BC_IFUNCF: + dasm_put(Dst, 16829, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams)); + if (op == BC_JFUNCF) { + dasm_put(Dst, 16859, BC_JLOOP); + } else { + dasm_put(Dst, 11336); + } + dasm_put(Dst, 16868, LJ_TNIL); + break; + + case BC_JFUNCV: +#if !LJ_HASJIT + break; +#endif + dasm_put(Dst, 10436); + break; /* NYI: compiled vararg functions. */ + + case BC_IFUNCV: + dasm_put(Dst, 16890, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL); + if (op == BC_JFUNCV) { + dasm_put(Dst, 16859, BC_JLOOP); + } else { + dasm_put(Dst, 16981, -4+PC2PROTO(k)); + } + dasm_put(Dst, 17003, LJ_TNIL); + break; + + case BC_FUNCC: + case BC_FUNCCW: + dasm_put(Dst, 17025, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top)); + if (op == BC_FUNCC) { + dasm_put(Dst, 17054); + } else { + dasm_put(Dst, 17058); + } + dasm_put(Dst, 17066, DISPATCH_GL(vmstate), ~LJ_VMST_C); + if (op == BC_FUNCC) { + dasm_put(Dst, 17075); + } else { + dasm_put(Dst, 17079, DISPATCH_GL(wrapf)); + } + dasm_put(Dst, 17084, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top)); + break; + + /* ---------------------------------------------------------------------- */ + + default: + fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]); + exit(2); + break; + } +} + +static int build_backend(BuildCtx *ctx) +{ + int op; + int cmov = 1; + int sse = 0; +#ifdef LUAJIT_CPU_NOCMOV + cmov = 0; +#endif +#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64) + sse = 1; +#endif + + dasm_growpc(Dst, BC__MAX); + + build_subroutines(ctx, cmov, sse); + + dasm_put(Dst, 17109); + for (op = 0; op < BC__MAX; op++) + build_ins(ctx, (BCOp)op, op, cmov, sse); + + return BC__MAX; +} + +/* Emit pseudo frame-info for all assembler functions. */ +static void emit_asm_debug(BuildCtx *ctx) +{ +#if LJ_64 +#define SZPTR "8" +#define BSZPTR "3" +#define REG_SP "0x7" +#define REG_RA "0x10" +#else +#define SZPTR "4" +#define BSZPTR "2" +#define REG_SP "0x4" +#define REG_RA "0x8" +#endif + switch (ctx->mode) { + case BUILD_elfasm: + fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n"); + fprintf(ctx->fp, + ".Lframe0:\n" + "\t.long .LECIE0-.LSCIE0\n" + ".LSCIE0:\n" + "\t.long 0xffffffff\n" + "\t.byte 0x1\n" + "\t.string \"\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE0:\n\n"); + fprintf(ctx->fp, + ".LSFDE0:\n" + "\t.long .LEFDE0-.LASFDE0\n" + ".LASFDE0:\n" + "\t.long .Lframe0\n" + "\t.long .Lbegin\n" + "\t.long %d\n" + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE0:\n\n", (int)ctx->codesz, CFRAME_SIZE); +#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_) + fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n"); +#else + fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n"); +#endif + fprintf(ctx->fp, + ".Lframe1:\n" + "\t.long .LECIE1-.LSCIE1\n" + ".LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zPR\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 6\n" /* augmentation length */ + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.long lj_err_unwind_dwarf-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + ".LECIE1:\n\n"); + fprintf(ctx->fp, + ".LSFDE1:\n" + "\t.long .LEFDE1-.LASFDE1\n" + ".LASFDE1:\n" + "\t.long .LASFDE1-.Lframe1\n" + "\t.long .Lbegin-.\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + ".LEFDE1:\n\n", (int)ctx->codesz, CFRAME_SIZE); + break; + case BUILD_coffasm: + fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n"); + fprintf(ctx->fp, + "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n", + LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "Lframe1:\n" + "\t.long LECIE1-LSCIE1\n" + "LSCIE1:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.string \"zP\"\n" + "\t.uleb128 0x1\n" + "\t.sleb128 -" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.uleb128 5\n" /* augmentation length */ + "\t.byte 0x00\n" /* absptr */ + "\t.long %slj_err_unwind_dwarf\n" + "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n" + "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n" + "\t.align " SZPTR "\n" + "LECIE1:\n\n", LJ_32 ? "_" : ""); + fprintf(ctx->fp, + "LSFDE1:\n" + "\t.long LEFDE1-LASFDE1\n" + "LASFDE1:\n" + "\t.long LASFDE1-Lframe1\n" + "\t.long %slj_vm_asm_begin\n" + "\t.long %d\n" + "\t.uleb128 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */ +#else + "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */ + "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */ +#endif + "\t.align " SZPTR "\n" + "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE); + break; + /* Mental note: never let Apple design an assembler. + ** Or a linker. Or a plastic case. But I digress. + */ + case BUILD_machasm: { + int i; + fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n"); + fprintf(ctx->fp, + "EH_frame1:\n" + "\t.set L$set$x,LECIEX-LSCIEX\n" + "\t.long L$set$x\n" + "LSCIEX:\n" + "\t.long 0\n" + "\t.byte 0x1\n" + "\t.ascii \"zPR\\0\"\n" + "\t.byte 0x1\n" + "\t.byte 128-" SZPTR "\n" + "\t.byte " REG_RA "\n" + "\t.byte 6\n" /* augmentation length */ + "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */ +#if LJ_64 + "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n" +#else + "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n" + "\t.byte 0x1b\n" /* pcrel|sdata4 */ + "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */ +#endif + "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n" + "\t.align " BSZPTR "\n" + "LECIEX:\n\n"); + for (i = 0; i < ctx->nsym; i++) { + const char *name = ctx->sym[i].name; + int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs; + if (size == 0) continue; + fprintf(ctx->fp, + "%s.eh:\n" + "LSFDE%d:\n" + "\t.set L$set$%d,LEFDE%d-LASFDE%d\n" + "\t.long L$set$%d\n" + "LASFDE%d:\n" + "\t.long LASFDE%d-EH_frame1\n" + "\t.long %s-.\n" + "\t.long %d\n" + "\t.byte 0\n" /* augmentation length */ + "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */ +#if LJ_64 + "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */ + "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */ + "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */ + "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */ +#else + "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/ + "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */ + "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */ + "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */ +#endif + "\t.align " BSZPTR "\n" + "LEFDE%d:\n\n", + name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i); + } +#if LJ_64 + fprintf(ctx->fp, "\t.subsections_via_symbols\n"); +#else + fprintf(ctx->fp, + "\t.non_lazy_symbol_pointer\n" + "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" + ".indirect_symbol _lj_err_unwind_dwarf\n" + ".long 0\n"); +#endif + } + break; + default: /* Difficult for other modes. */ + break; + } +} + diff --git a/src/luajit2/src/lauxlib.h b/src/luajit2/src/lauxlib.h new file mode 100644 index 0000000000..505a9f5228 --- /dev/null +++ b/src/luajit2/src/lauxlib.h @@ -0,0 +1,159 @@ +/* +** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lauxlib_h +#define lauxlib_h + + +#include <stddef.h> +#include <stdio.h> + +#include "lua.h" + + +#define luaL_getn(L,i) ((int)lua_objlen(L, i)) +#define luaL_setn(L,i,j) ((void)0) /* no op! */ + +/* extra error code for `luaL_load' */ +#define LUA_ERRFILE (LUA_ERRERR+1) + +typedef struct luaL_Reg { + const char *name; + lua_CFunction func; +} luaL_Reg; + +LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname, + const luaL_Reg *l, int nup); +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l); +LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); +LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); +LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, + size_t *l); +LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, + const char *def, size_t *l); +LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); +LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); + +LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); +LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, + lua_Integer def); + +LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); +LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); +LUALIB_API void (luaL_checkany) (lua_State *L, int narg); + +LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); +LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); + +LUALIB_API void (luaL_where) (lua_State *L, int lvl); +LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); + +LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, + const char *const lst[]); + +LUALIB_API int (luaL_ref) (lua_State *L, int t); +LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); + +LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); +LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, + const char *name); +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); + +LUALIB_API lua_State *(luaL_newstate) (void); + + +LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, + const char *r); + +LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, + const char *fname, int szhint); + + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define luaL_argcheck(L, cond,numarg,extramsg) \ + ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) +#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) +#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) +#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) +#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) +#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) +#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) + +#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) + +#define luaL_dofile(L, fn) \ + (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_dostring(L, s) \ + (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) + +#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + + +typedef struct luaL_Buffer { + char *p; /* current position in buffer */ + int lvl; /* number of strings in the stack (level) */ + lua_State *L; + char buffer[LUAL_BUFFERSIZE]; +} luaL_Buffer; + +#define luaL_addchar(B,c) \ + ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ + (*(B)->p++ = (char)(c))) + +/* compatibility only */ +#define luaL_putchar(B,c) luaL_addchar(B,c) + +#define luaL_addsize(B,n) ((B)->p += (n)) + +LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); +LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); +LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); +LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); +LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); +LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); + + +/* }====================================================== */ + + +/* compatibility with ref system */ + +/* pre-defined references */ +#define LUA_NOREF (-2) +#define LUA_REFNIL (-1) + +#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \ + (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0)) + +#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref)) + +#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref)) + + +#define luaL_reg luaL_Reg + +#endif diff --git a/src/luajit2/src/lib_aux.c b/src/luajit2/src/lib_aux.c new file mode 100644 index 0000000000..ac2a5e9080 --- /dev/null +++ b/src/luajit2/src/lib_aux.c @@ -0,0 +1,377 @@ +/* +** Auxiliary library for the Lua/C API. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major parts taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <errno.h> +#include <stdarg.h> +#include <stdio.h> + +#define lib_aux_c +#define LUA_LIB + +#include "physfs.h" + +#include "lua.h" +#include "lauxlib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_state.h" +#include "lj_lib.h" + +/* -- Module registration ------------------------------------------------- */ + +LUALIB_API const char *luaL_findtable(lua_State *L, int idx, + const char *fname, int szhint) +{ + const char *e; + lua_pushvalue(L, idx); + do { + e = strchr(fname, '.'); + if (e == NULL) e = fname + strlen(fname); + lua_pushlstring(L, fname, (size_t)(e - fname)); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { /* no such field? */ + lua_pop(L, 1); /* remove this nil */ + lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */ + lua_pushlstring(L, fname, (size_t)(e - fname)); + lua_pushvalue(L, -2); + lua_settable(L, -4); /* set new table into field */ + } else if (!lua_istable(L, -1)) { /* field has a non-table value? */ + lua_pop(L, 2); /* remove table and value */ + return fname; /* return problematic part of the name */ + } + lua_remove(L, -2); /* remove previous table */ + fname = e + 1; + } while (*e == '.'); + return NULL; +} + +static int libsize(const luaL_Reg *l) +{ + int size = 0; + for (; l->name; l++) size++; + return size; +} + +LUALIB_API void luaL_openlib(lua_State *L, const char *libname, + const luaL_Reg *l, int nup) +{ + lj_lib_checkfpu(L); + if (libname) { + int size = libsize(l); + /* check whether lib already exists */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_getfield(L, -1, libname); /* get _LOADED[libname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, libname); + lua_pushvalue(L, -1); + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + lua_remove(L, -2); /* remove _LOADED table */ + lua_insert(L, -(nup+1)); /* move library table to below upvalues */ + } + for (; l->name; l++) { + int i; + for (i = 0; i < nup; i++) /* copy upvalues to the top */ + lua_pushvalue(L, -nup); + lua_pushcclosure(L, l->func, nup); + lua_setfield(L, -(nup+2), l->name); + } + lua_pop(L, nup); /* remove upvalues */ +} + +LUALIB_API void luaL_register(lua_State *L, const char *libname, + const luaL_Reg *l) +{ + luaL_openlib(L, libname, l, 0); +} + +LUALIB_API const char *luaL_gsub(lua_State *L, const char *s, + const char *p, const char *r) +{ + const char *wild; + size_t l = strlen(p); + luaL_Buffer b; + luaL_buffinit(L, &b); + while ((wild = strstr(s, p)) != NULL) { + luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */ + luaL_addstring(&b, r); /* push replacement in place of pattern */ + s = wild + l; /* continue after `p' */ + } + luaL_addstring(&b, s); /* push last suffix */ + luaL_pushresult(&b); + return lua_tostring(L, -1); +} + +/* -- Buffer handling ----------------------------------------------------- */ + +#define bufflen(B) ((size_t)((B)->p - (B)->buffer)) +#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B))) + +static int emptybuffer(luaL_Buffer *B) +{ + size_t l = bufflen(B); + if (l == 0) + return 0; /* put nothing on stack */ + lua_pushlstring(B->L, B->buffer, l); + B->p = B->buffer; + B->lvl++; + return 1; +} + +static void adjuststack(luaL_Buffer *B) +{ + if (B->lvl > 1) { + lua_State *L = B->L; + int toget = 1; /* number of levels to concat */ + size_t toplen = lua_strlen(L, -1); + do { + size_t l = lua_strlen(L, -(toget+1)); + if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l)) + break; + toplen += l; + toget++; + } while (toget < B->lvl); + lua_concat(L, toget); + B->lvl = B->lvl - toget + 1; + } +} + +LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B) +{ + if (emptybuffer(B)) + adjuststack(B); + return B->buffer; +} + +LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l) +{ + while (l--) + luaL_addchar(B, *s++); +} + +LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s) +{ + luaL_addlstring(B, s, strlen(s)); +} + +LUALIB_API void luaL_pushresult(luaL_Buffer *B) +{ + emptybuffer(B); + lua_concat(B->L, B->lvl); + B->lvl = 1; +} + +LUALIB_API void luaL_addvalue(luaL_Buffer *B) +{ + lua_State *L = B->L; + size_t vl; + const char *s = lua_tolstring(L, -1, &vl); + if (vl <= bufffree(B)) { /* fit into buffer? */ + memcpy(B->p, s, vl); /* put it there */ + B->p += vl; + lua_pop(L, 1); /* remove from stack */ + } else { + if (emptybuffer(B)) + lua_insert(L, -2); /* put buffer before new value */ + B->lvl++; /* add new value into B stack */ + adjuststack(B); + } +} + +LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B) +{ + B->L = L; + B->p = B->buffer; + B->lvl = 0; +} + +/* -- Reference management ------------------------------------------------ */ + +#define FREELIST_REF 0 + +/* Convert a stack index to an absolute index. */ +#define abs_index(L, i) \ + ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1) + +LUALIB_API int luaL_ref(lua_State *L, int t) +{ + int ref; + t = abs_index(L, t); + if (lua_isnil(L, -1)) { + lua_pop(L, 1); /* remove from stack */ + return LUA_REFNIL; /* `nil' has a unique fixed reference */ + } + lua_rawgeti(L, t, FREELIST_REF); /* get first free element */ + ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */ + lua_pop(L, 1); /* remove it from stack */ + if (ref != 0) { /* any free element? */ + lua_rawgeti(L, t, ref); /* remove it from list */ + lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */ + } else { /* no free elements */ + ref = (int)lua_objlen(L, t); + ref++; /* create new reference */ + } + lua_rawseti(L, t, ref); + return ref; +} + +LUALIB_API void luaL_unref(lua_State *L, int t, int ref) +{ + if (ref >= 0) { + t = abs_index(L, t); + lua_rawgeti(L, t, FREELIST_REF); + lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */ + lua_pushinteger(L, ref); + lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */ + } +} + +/* -- Load Lua code ------------------------------------------------------- */ + +typedef struct FileReaderCtx { + PHYSFS_File *fp; + char buf[LUAL_BUFFERSIZE]; +} FileReaderCtx; + +static const char *reader_file(lua_State *L, void *ud, size_t *size) +{ + FileReaderCtx *ctx = (FileReaderCtx *)ud; + UNUSED(L); + if (PHYSFS_eof(ctx->fp)) return NULL; + *size = PHYSFS_read(ctx->fp, ctx->buf, 1, sizeof(ctx->buf)); + return *size > 0 ? ctx->buf : NULL; +} + +LUALIB_API int luaL_loadfile(lua_State *L, const char *filename) +{ + FileReaderCtx ctx; + int status; + const char *chunkname; + if (filename) { + ctx.fp = PHYSFS_openRead(filename); + if (ctx.fp == NULL) { + lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno)); + return LUA_ERRFILE; + } + chunkname = lua_pushfstring(L, "@%s", filename); + } else { +// ctx.fp = stdin; +// chunkname = "=stdin"; + } + status = lua_load(L, reader_file, &ctx, chunkname); +/* if (ferror(ctx.fp)) { + L->top -= filename ? 2 : 1; + lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno)); + if (filename) + fclose(ctx.fp); + return LUA_ERRFILE; + } */ + if (filename) { + L->top--; + copyTV(L, L->top-1, L->top); + PHYSFS_close(ctx.fp); + } + return status; +} + +typedef struct StringReaderCtx { + const char *str; + size_t size; +} StringReaderCtx; + +static const char *reader_string(lua_State *L, void *ud, size_t *size) +{ + StringReaderCtx *ctx = (StringReaderCtx *)ud; + UNUSED(L); + if (ctx->size == 0) return NULL; + *size = ctx->size; + ctx->size = 0; + return ctx->str; +} + +LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size, + const char *name) +{ + StringReaderCtx ctx; + ctx.str = buf; + ctx.size = size; + return lua_load(L, reader_string, &ctx, name); +} + +LUALIB_API int luaL_loadstring(lua_State *L, const char *s) +{ + return luaL_loadbuffer(L, s, strlen(s), s); +} + +/* -- Default allocator and panic function -------------------------------- */ + +static int panic(lua_State *L) +{ + fprintf(stderr, "PANIC: unprotected error in call to Lua API (%s)\n", + lua_tostring(L, -1)); + return 0; +} + +#ifdef LUAJIT_USE_SYSMALLOC + +#if LJ_64 +#error "Must use builtin allocator for 64 bit target" +#endif + +static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize) +{ + (void)ud; + (void)osize; + if (nsize == 0) { + free(ptr); + return NULL; + } else { + return realloc(ptr, nsize); + } +} + +LUALIB_API lua_State *luaL_newstate(void) +{ + lua_State *L = lua_newstate(mem_alloc, NULL); + if (L) G(L)->panic = panic; + return L; +} + +#else + +#include "lj_alloc.h" + +LUALIB_API lua_State *luaL_newstate(void) +{ + lua_State *L; + void *ud = lj_alloc_create(); + if (ud == NULL) return NULL; +#if LJ_64 + L = lj_state_newstate(lj_alloc_f, ud); +#else + L = lua_newstate(lj_alloc_f, ud); +#endif + if (L) G(L)->panic = panic; + return L; +} + +#if LJ_64 +LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) +{ + UNUSED(f); UNUSED(ud); + fprintf(stderr, "Must use luaL_newstate() for 64 bit target\n"); + return NULL; +} +#endif + +#endif + diff --git a/src/luajit2/src/lib_base.c b/src/luajit2/src/lib_base.c new file mode 100644 index 0000000000..f8975986a4 --- /dev/null +++ b/src/luajit2/src/lib_base.c @@ -0,0 +1,650 @@ +/* +** Base and coroutine library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <stdio.h> + +#define lib_base_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_state.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cconv.h" +#endif +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_dispatch.h" +#include "lj_char.h" +#include "lj_lib.h" + +/* -- Base library: checks ------------------------------------------------ */ + +#define LJLIB_MODULE_base + +LJLIB_ASM(assert) LJLIB_REC(.) +{ + GCstr *s; + lj_lib_checkany(L, 1); + s = lj_lib_optstr(L, 2); + if (s) + lj_err_callermsg(L, strdata(s)); + else + lj_err_caller(L, LJ_ERR_ASSERT); + return FFH_UNREACHABLE; +} + +/* ORDER LJ_T */ +LJLIB_PUSH("nil") +LJLIB_PUSH("boolean") +LJLIB_PUSH(top-1) /* boolean */ +LJLIB_PUSH("userdata") +LJLIB_PUSH("string") +LJLIB_PUSH("upval") +LJLIB_PUSH("thread") +LJLIB_PUSH("proto") +LJLIB_PUSH("function") +LJLIB_PUSH("trace") +LJLIB_PUSH("cdata") +LJLIB_PUSH("table") +LJLIB_PUSH(top-9) /* userdata */ +LJLIB_PUSH("number") +LJLIB_ASM_(type) LJLIB_REC(.) +/* Recycle the lj_lib_checkany(L, 1) from assert. */ + +/* -- Base library: getters and setters ----------------------------------- */ + +LJLIB_ASM_(getmetatable) LJLIB_REC(.) +/* Recycle the lj_lib_checkany(L, 1) from assert. */ + +LJLIB_ASM(setmetatable) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + GCtab *mt = lj_lib_checktabornil(L, 2); + if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable))) + lj_err_caller(L, LJ_ERR_PROTMT); + setgcref(t->metatable, obj2gco(mt)); + if (mt) { lj_gc_objbarriert(L, t, mt); } + settabV(L, L->base-1, t); + return FFH_RES(1); +} + +LJLIB_CF(getfenv) +{ + GCfunc *fn; + cTValue *o = L->base; + if (!(o < L->top && tvisfunc(o))) { + int level = lj_lib_optint(L, 1, 1); + o = lj_debug_frame(L, level, &level); + if (o == NULL) + lj_err_arg(L, 1, LJ_ERR_INVLVL); + } + fn = &gcval(o)->fn; + settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env)); + return 1; +} + +LJLIB_CF(setfenv) +{ + GCfunc *fn; + GCtab *t = lj_lib_checktab(L, 2); + cTValue *o = L->base; + if (!(o < L->top && tvisfunc(o))) { + int level = lj_lib_checkint(L, 1); + if (level == 0) { + /* NOBARRIER: A thread (i.e. L) is never black. */ + setgcref(L->env, obj2gco(t)); + return 0; + } + o = lj_debug_frame(L, level, &level); + if (o == NULL) + lj_err_arg(L, 1, LJ_ERR_INVLVL); + } + fn = &gcval(o)->fn; + if (!isluafunc(fn)) + lj_err_caller(L, LJ_ERR_SETFENV); + setgcref(fn->l.env, obj2gco(t)); + lj_gc_objbarrier(L, obj2gco(fn), t); + setfuncV(L, L->top++, fn); + return 1; +} + +LJLIB_ASM(rawget) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkany(L, 2); + return FFH_UNREACHABLE; +} + +LJLIB_CF(rawset) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkany(L, 2); + L->top = 1+lj_lib_checkany(L, 3); + lua_rawset(L, 1); + return 1; +} + +LJLIB_CF(rawequal) LJLIB_REC(.) +{ + cTValue *o1 = lj_lib_checkany(L, 1); + cTValue *o2 = lj_lib_checkany(L, 2); + setboolV(L->top-1, lj_obj_equal(o1, o2)); + return 1; +} + +LJLIB_CF(unpack) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n, i = lj_lib_optint(L, 2, 1); + int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ? + lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t); + if (i > e) return 0; + n = e - i + 1; + if (n <= 0 || !lua_checkstack(L, n)) + lj_err_caller(L, LJ_ERR_UNPACK); + do { + cTValue *tv = lj_tab_getint(t, i); + if (tv) { + copyTV(L, L->top++, tv); + } else { + setnilV(L->top++); + } + } while (i++ < e); + return n; +} + +LJLIB_CF(select) LJLIB_REC(.) +{ + int32_t n = (int32_t)(L->top - L->base); + if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') { + setintV(L->top-1, n-1); + return 1; + } else { + int32_t i = lj_lib_checkint(L, 1); + if (i < 0) i = n + i; else if (i > n) i = n; + if (i < 1) + lj_err_arg(L, 1, LJ_ERR_IDXRNG); + return n - i; + } +} + +/* -- Base library: conversions ------------------------------------------- */ + +LJLIB_ASM(tonumber) LJLIB_REC(.) +{ + int32_t base = lj_lib_optint(L, 2, 10); + if (base == 10) { + TValue *o = lj_lib_checkany(L, 1); + if (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))) { + copyTV(L, L->base-1, o); + return FFH_RES(1); + } +#if LJ_HASFFI + if (tviscdata(o)) { + CTState *cts = ctype_cts(L); + CType *ct = lj_ctype_rawref(cts, cdataV(o)->typeid); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { + if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) && + ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) { + int32_t i; + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0); + setintV(L->base-1, i); + return FFH_RES(1); + } + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE), + (uint8_t *)&(L->base-1)->n, o, 0); + return FFH_RES(1); + } + } +#endif + } else { + const char *p = strdata(lj_lib_checkstr(L, 1)); + char *ep; + unsigned long ul; + if (base < 2 || base > 36) + lj_err_arg(L, 2, LJ_ERR_BASERNG); + ul = strtoul(p, &ep, base); + if (p != ep) { + while (lj_char_isspace((unsigned char)(*ep))) ep++; + if (*ep == '\0') { + if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u)) + setintV(L->base-1, (int32_t)ul); + else + setnumV(L->base-1, (lua_Number)ul); + return FFH_RES(1); + } + } + } + setnilV(L->base-1); + return FFH_RES(1); +} + +LJLIB_PUSH("nil") +LJLIB_PUSH("false") +LJLIB_PUSH("true") +LJLIB_ASM(tostring) LJLIB_REC(.) +{ + TValue *o = lj_lib_checkany(L, 1); + cTValue *mo; + L->top = o+1; /* Only keep one argument. */ + if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { + copyTV(L, L->base-1, mo); /* Replace callable. */ + return FFH_TAILCALL; + } else { + GCstr *s; + if (tvisnumber(o)) { + s = lj_str_fromnumber(L, o); + } else if (tvispri(o)) { + s = strV(lj_lib_upvalue(L, -(int32_t)itype(o))); + } else { + if (tvisfunc(o) && isffunc(funcV(o))) + lua_pushfstring(L, "function: fast#%d", funcV(o)->c.ffid); + else + lua_pushfstring(L, "%s: %p", typename(o), lua_topointer(L, 1)); + /* Note: lua_pushfstring calls the GC which may invalidate o. */ + s = strV(L->top-1); + } + setstrV(L, L->base-1, s); + return FFH_RES(1); + } +} + +/* -- Base library: iterators --------------------------------------------- */ + +/* This solves a circular dependency problem -- change FF_next_N as needed. */ +LJ_STATIC_ASSERT((int)FF_next == FF_next_N); + +LJLIB_ASM(next) +{ + lj_lib_checktab(L, 1); + return FFH_UNREACHABLE; +} + +#ifdef LUAJIT_ENABLE_LUA52COMPAT +static int ffh_pairs(lua_State *L, MMS mm) +{ + TValue *o = lj_lib_checkany(L, 1); + cTValue *mo = lj_meta_lookup(L, o, mm); + if (!tvisnil(mo)) { + L->top = o+1; /* Only keep one argument. */ + copyTV(L, L->base-1, mo); /* Replace callable. */ + return FFH_TAILCALL; + } else { + if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE); + setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1))); + if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0); + return FFH_RES(3); + } +} +#else +#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE) +#endif + +LJLIB_PUSH(lastcl) +LJLIB_ASM(pairs) +{ + return ffh_pairs(L, MM_pairs); +} + +LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + lj_lib_checkint(L, 2); + return FFH_UNREACHABLE; +} + +LJLIB_PUSH(lastcl) +LJLIB_ASM(ipairs) LJLIB_REC(.) +{ + return ffh_pairs(L, MM_ipairs); +} + +/* -- Base library: throw and catch errors -------------------------------- */ + +LJLIB_CF(error) +{ + int32_t level = lj_lib_optint(L, 2, 1); + lua_settop(L, 1); + if (lua_isstring(L, 1) && level > 0) { + luaL_where(L, level); + lua_pushvalue(L, 1); + lua_concat(L, 2); + } + return lua_error(L); +} + +LJLIB_ASM(pcall) LJLIB_REC(.) +{ + lj_lib_checkany(L, 1); + lj_lib_checkfunc(L, 2); /* For xpcall only. */ + return FFH_UNREACHABLE; +} +LJLIB_ASM_(xpcall) LJLIB_REC(.) + +/* -- Base library: load Lua code ----------------------------------------- */ + +static int load_aux(lua_State *L, int status) +{ + if (status == 0) + return 1; + copyTV(L, L->top, L->top-1); + setnilV(L->top-1); + L->top++; + return 2; +} + +LJLIB_CF(loadstring) +{ + GCstr *s = lj_lib_checkstr(L, 1); + GCstr *name = lj_lib_optstr(L, 2); + return load_aux(L, + luaL_loadbuffer(L, strdata(s), s->len, strdata(name ? name : s))); +} + +LJLIB_CF(loadfile) +{ + GCstr *fname = lj_lib_optstr(L, 1); + return load_aux(L, luaL_loadfile(L, fname ? strdata(fname) : NULL)); +} + +static const char *reader_func(lua_State *L, void *ud, size_t *size) +{ + UNUSED(ud); + luaL_checkstack(L, 2, "too many nested functions"); + copyTV(L, L->top++, L->base); + lua_call(L, 0, 1); /* Call user-supplied function. */ + L->top--; + if (tvisnil(L->top)) { + *size = 0; + return NULL; + } else if (tvisstr(L->top) || tvisnumber(L->top)) { + copyTV(L, L->base+2, L->top); /* Anchor string in reserved stack slot. */ + return lua_tolstring(L, 3, size); + } else { + lj_err_caller(L, LJ_ERR_RDRSTR); + return NULL; + } +} + +LJLIB_CF(load) +{ + GCstr *name; + if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) + return lj_cf_loadstring(L); + lj_lib_checkfunc(L, 1); + name = lj_lib_optstr(L, 2); + lua_settop(L, 3); /* Reserve a slot for the string from the reader. */ + return load_aux(L, + lua_load(L, reader_func, NULL, name ? strdata(name) : "=(load)")); +} + +LJLIB_CF(dofile) +{ + GCstr *fname = lj_lib_optstr(L, 1); + setnilV(L->top); + L->top = L->base+1; + if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0) + lua_error(L); + lua_call(L, 0, LUA_MULTRET); + return (int)(L->top - L->base) - 1; +} + +/* -- Base library: GC control -------------------------------------------- */ + +LJLIB_CF(gcinfo) +{ + setintV(L->top++, (G(L)->gc.total >> 10)); + return 1; +} + +LJLIB_CF(collectgarbage) +{ + int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */ + "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul"); + int32_t data = lj_lib_optint(L, 2, 0); + if (opt == LUA_GCCOUNT) { + setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0); + } else { + int res = lua_gc(L, opt, data); + if (opt == LUA_GCSTEP) + setboolV(L->top, res); + else + setintV(L->top, res); + } + L->top++; + return 1; +} + +/* -- Base library: miscellaneous functions ------------------------------- */ + +LJLIB_PUSH(top-2) /* Upvalue holds weak table. */ +LJLIB_CF(newproxy) +{ + lua_settop(L, 1); + lua_newuserdata(L, 0); + if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */ + return 1; + } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */ + lua_newtable(L); + lua_pushvalue(L, -1); + lua_pushboolean(L, 1); + lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */ + } else { /* newproxy(proxy): inherit metatable. */ + int validproxy = 0; + if (lua_getmetatable(L, 1)) { + lua_rawget(L, lua_upvalueindex(1)); + validproxy = lua_toboolean(L, -1); + lua_pop(L, 1); + } + if (!validproxy) + lj_err_arg(L, 1, LJ_ERR_NOPROXY); + lua_getmetatable(L, 1); + } + lua_setmetatable(L, 2); + return 1; +} + +LJLIB_PUSH("tostring") +LJLIB_CF(print) +{ + ptrdiff_t i, nargs = L->top - L->base; + cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1))); + int shortcut; + if (tv && !tvisnil(tv)) { + copyTV(L, L->top++, tv); + } else { + setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1))); + lua_gettable(L, LUA_GLOBALSINDEX); + tv = L->top-1; + } + shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring); + for (i = 0; i < nargs; i++) { + const char *str; + size_t size; + cTValue *o = &L->base[i]; + if (shortcut && tvisstr(o)) { + str = strVdata(o); + size = strV(o)->len; + } else if (shortcut && tvisint(o)) { + char buf[LJ_STR_INTBUF]; + char *p = lj_str_bufint(buf, intV(o)); + size = (size_t)(buf+LJ_STR_INTBUF-p); + str = p; + } else if (shortcut && tvisnum(o)) { + char buf[LJ_STR_NUMBUF]; + size = lj_str_bufnum(buf, o); + str = buf; + } else { + copyTV(L, L->top+1, o); + copyTV(L, L->top, L->top-1); + L->top += 2; + lua_call(L, 1, 1); + str = lua_tolstring(L, -1, &size); + if (!str) + lj_err_caller(L, LJ_ERR_PRTOSTR); + L->top--; + } + if (i) + putchar('\t'); + fwrite(str, 1, size, stdout); + } + putchar('\n'); + return 0; +} + +LJLIB_PUSH(top-3) +LJLIB_SET(_VERSION) + +#include "lj_libdef.h" + +/* -- Coroutine library --------------------------------------------------- */ + +#define LJLIB_MODULE_coroutine + +LJLIB_CF(coroutine_status) +{ + const char *s; + lua_State *co; + if (!(L->top > L->base && tvisthread(L->base))) + lj_err_arg(L, 1, LJ_ERR_NOCORO); + co = threadV(L->base); + if (co == L) s = "running"; + else if (co->status == LUA_YIELD) s = "suspended"; + else if (co->status != 0) s = "dead"; + else if (co->base > tvref(co->stack)+1) s = "normal"; + else if (co->top == co->base) s = "dead"; + else s = "suspended"; + lua_pushstring(L, s); + return 1; +} + +LJLIB_CF(coroutine_running) +{ +#ifdef LUAJIT_ENABLE_LUA52COMPAT + int ismain = lua_pushthread(L); + setboolV(L->top++, ismain); + return 2; +#else + if (lua_pushthread(L)) + setnilV(L->top++); + return 1; +#endif +} + +LJLIB_CF(coroutine_create) +{ + lua_State *L1 = lua_newthread(L); + if (!(L->base < L->top && tvisfunc(L->base))) + lj_err_argt(L, 1, LUA_TFUNCTION); + setfuncV(L, L1->top++, funcV(L->base)); + return 1; +} + +LJLIB_ASM(coroutine_yield) +{ + lj_err_caller(L, LJ_ERR_CYIELD); + return FFH_UNREACHABLE; +} + +static int ffh_resume(lua_State *L, lua_State *co, int wrap) +{ + if (co->cframe != NULL || co->status > LUA_YIELD || + (co->status == 0 && co->top == co->base)) { + ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD; + if (wrap) lj_err_caller(L, em); + setboolV(L->base-1, 0); + setstrV(L, L->base, lj_err_str(L, em)); + return FFH_RES(2); + } + lj_state_growstack(co, (MSize)(L->top - L->base)); + return FFH_RETRY; +} + +LJLIB_ASM(coroutine_resume) +{ + if (!(L->top > L->base && tvisthread(L->base))) + lj_err_arg(L, 1, LJ_ERR_NOCORO); + return ffh_resume(L, threadV(L->base), 0); +} + +LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux) +{ + return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1); +} + +/* Inline declarations. */ +LJ_ASMF void lj_ff_coroutine_wrap_aux(void); +LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, + lua_State *co); + +/* Error handler, called from assembler VM. */ +void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co) +{ + co->top--; copyTV(L, L->top, co->top); L->top++; + if (tvisstr(L->top-1)) + lj_err_callermsg(L, strVdata(L->top-1)); + else + lj_err_run(L); +} + +/* Forward declaration. */ +static void setpc_wrap_aux(lua_State *L, GCfunc *fn); + +LJLIB_CF(coroutine_wrap) +{ + lj_cf_coroutine_create(L); + lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1); + setpc_wrap_aux(L, funcV(L->top-1)); + return 1; +} + +#include "lj_libdef.h" + +/* Fix the PC of wrap_aux. Really ugly workaround. */ +static void setpc_wrap_aux(lua_State *L, GCfunc *fn) +{ + setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]); +} + +/* ------------------------------------------------------------------------ */ + +static void newproxy_weaktable(lua_State *L) +{ + /* NOBARRIER: The table is new (marked white). */ + GCtab *t = lj_tab_new(L, 0, 1); + settabV(L, L->top++, t); + setgcref(t->metatable, obj2gco(t)); + setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), + lj_str_newlit(L, "kv")); + t->nomm = (uint8_t)(~(1u<<MM_mode)); +} + +LUALIB_API int luaopen_base(lua_State *L) +{ + /* NOBARRIER: Table and value are the same. */ + GCtab *env = tabref(L->env); + settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env); + lua_pushliteral(L, LUA_VERSION); /* top-3. */ + newproxy_weaktable(L); /* top-2. */ + LJ_LIB_REG(L, "_G", base); + LJ_LIB_REG(L, LUA_COLIBNAME, coroutine); + return 2; +} + diff --git a/src/luajit2/src/lib_bit.c b/src/luajit2/src/lib_bit.c new file mode 100644 index 0000000000..c6c5c6fef8 --- /dev/null +++ b/src/luajit2/src/lib_bit.c @@ -0,0 +1,74 @@ +/* +** Bit manipulation library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_bit_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_bit + +LJLIB_ASM(bit_tobit) LJLIB_REC(bit_unary IR_TOBIT) +{ + lj_lib_checknumber(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(bit_bnot) LJLIB_REC(bit_unary IR_BNOT) +LJLIB_ASM_(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP) + +LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL) +{ + lj_lib_checknumber(L, 1); + lj_lib_checkbit(L, 2); + return FFH_RETRY; +} +LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR) +LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR) +LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL) +LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR) + +LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND) +{ + int i = 0; + do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); + return FFH_RETRY; +} +LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR) +LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR) + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(bit_tohex) +{ + uint32_t b = (uint32_t)lj_lib_checkbit(L, 1); + int32_t i, n = L->base+1 >= L->top ? 8 : lj_lib_checkbit(L, 2); + const char *hexdigits = "0123456789abcdef"; + char buf[8]; + if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; } + if (n > 8) n = 8; + for (i = n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; } + lua_pushlstring(L, buf, (size_t)n); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_bit(lua_State *L) +{ + LJ_LIB_REG(L, LUA_BITLIBNAME, bit); + return 1; +} + diff --git a/src/luajit2/src/lib_debug.c b/src/luajit2/src/lib_debug.c new file mode 100644 index 0000000000..3f1cb8c66b --- /dev/null +++ b/src/luajit2/src/lib_debug.c @@ -0,0 +1,366 @@ +/* +** Debug library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_debug_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_debug + +LJLIB_CF(debug_getregistry) +{ + copyTV(L, L->top++, registry(L)); + return 1; +} + +LJLIB_CF(debug_getmetatable) +{ + lj_lib_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + setnilV(L->top-1); + } + return 1; +} + +LJLIB_CF(debug_setmetatable) +{ + lj_lib_checktabornil(L, 2); + L->top = L->base+2; + lua_setmetatable(L, 1); + setboolV(L->top-1, 1); + return 1; +} + +LJLIB_CF(debug_getfenv) +{ + lj_lib_checkany(L, 1); + lua_getfenv(L, 1); + return 1; +} + +LJLIB_CF(debug_setfenv) +{ + lj_lib_checktab(L, 2); + L->top = L->base+2; + if (!lua_setfenv(L, 1)) + lj_err_caller(L, LJ_ERR_SETFENV); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void settabss(lua_State *L, const char *i, const char *v) +{ + lua_pushstring(L, v); + lua_setfield(L, -2, i); +} + +static void settabsi(lua_State *L, const char *i, int v) +{ + lua_pushinteger(L, v); + lua_setfield(L, -2, i); +} + +static lua_State *getthread(lua_State *L, int *arg) +{ + if (L->base < L->top && tvisthread(L->base)) { + *arg = 1; + return threadV(L->base); + } else { + *arg = 0; + return L; + } +} + +static void treatstackoption(lua_State *L, lua_State *L1, const char *fname) +{ + if (L == L1) { + lua_pushvalue(L, -2); + lua_remove(L, -3); + } + else + lua_xmove(L1, L, 1); + lua_setfield(L, -2, fname); +} + +LJLIB_CF(debug_getinfo) +{ + lua_Debug ar; + int arg; + lua_State *L1 = getthread(L, &arg); + const char *options = luaL_optstring(L, arg+2, "flnSu"); + if (lua_isnumber(L, arg+1)) { + if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) { + setnilV(L->top-1); + return 1; + } + } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) { + options = lua_pushfstring(L, ">%s", options); + setfuncV(L1, L1->top++, funcV(L->base+arg)); + } else { + lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL); + } + if (!lua_getinfo(L1, options, &ar)) + lj_err_arg(L, arg+2, LJ_ERR_INVOPT); + lua_createtable(L, 0, 16); + if (strchr(options, 'S')) { + settabss(L, "source", ar.source); + settabss(L, "short_src", ar.short_src); + settabsi(L, "linedefined", ar.linedefined); + settabsi(L, "lastlinedefined", ar.lastlinedefined); + settabss(L, "what", ar.what); + } + if (strchr(options, 'l')) + settabsi(L, "currentline", ar.currentline); + if (strchr(options, 'u')) + settabsi(L, "nups", ar.nups); + if (strchr(options, 'n')) { + settabss(L, "name", ar.name); + settabss(L, "namewhat", ar.namewhat); + } + if (strchr(options, 'L')) + treatstackoption(L, L1, "activelines"); + if (strchr(options, 'f')) + treatstackoption(L, L1, "func"); + return 1; /* return table */ +} + +LJLIB_CF(debug_getlocal) +{ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + const char *name; + if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) + lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); + name = lua_getlocal(L1, &ar, lj_lib_checkint(L, arg+2)); + if (name) { + lua_xmove(L1, L, 1); + lua_pushstring(L, name); + lua_pushvalue(L, -2); + return 2; + } else { + setnilV(L->top-1); + return 1; + } +} + +LJLIB_CF(debug_setlocal) +{ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + TValue *tv; + if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar)) + lj_err_arg(L, arg+1, LJ_ERR_LVLRNG); + tv = lj_lib_checkany(L, arg+3); + copyTV(L1, L1->top++, tv); + lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2))); + return 1; +} + +static int debug_getupvalue(lua_State *L, int get) +{ + int32_t n = lj_lib_checkint(L, 2); + if (isluafunc(lj_lib_checkfunc(L, 1))) { + const char *name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); + if (name) { + lua_pushstring(L, name); + if (!get) return 1; + copyTV(L, L->top, L->top-2); + L->top++; + return 2; + } + } + return 0; +} + +LJLIB_CF(debug_getupvalue) +{ + return debug_getupvalue(L, 1); +} + +LJLIB_CF(debug_setupvalue) +{ + lj_lib_checkany(L, 3); + return debug_getupvalue(L, 0); +} + +/* ------------------------------------------------------------------------ */ + +static const char KEY_HOOK = 'h'; + +static void hookf(lua_State *L, lua_Debug *ar) +{ + static const char *const hooknames[] = + {"call", "return", "line", "count", "tail return"}; + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + if (lua_isfunction(L, -1)) { + lua_pushstring(L, hooknames[(int)ar->event]); + if (ar->currentline >= 0) + lua_pushinteger(L, ar->currentline); + else lua_pushnil(L); + lua_call(L, 2, 0); + } +} + +static int makemask(const char *smask, int count) +{ + int mask = 0; + if (strchr(smask, 'c')) mask |= LUA_MASKCALL; + if (strchr(smask, 'r')) mask |= LUA_MASKRET; + if (strchr(smask, 'l')) mask |= LUA_MASKLINE; + if (count > 0) mask |= LUA_MASKCOUNT; + return mask; +} + +static char *unmakemask(int mask, char *smask) +{ + int i = 0; + if (mask & LUA_MASKCALL) smask[i++] = 'c'; + if (mask & LUA_MASKRET) smask[i++] = 'r'; + if (mask & LUA_MASKLINE) smask[i++] = 'l'; + smask[i] = '\0'; + return smask; +} + +LJLIB_CF(debug_sethook) +{ + int arg, mask, count; + lua_Hook func; + (void)getthread(L, &arg); + if (lua_isnoneornil(L, arg+1)) { + lua_settop(L, arg+1); + func = NULL; mask = 0; count = 0; /* turn off hooks */ + } else { + const char *smask = luaL_checkstring(L, arg+2); + luaL_checktype(L, arg+1, LUA_TFUNCTION); + count = luaL_optint(L, arg+3, 0); + func = hookf; mask = makemask(smask, count); + } + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_pushvalue(L, arg+1); + lua_rawset(L, LUA_REGISTRYINDEX); + lua_sethook(L, func, mask, count); + return 0; +} + +LJLIB_CF(debug_gethook) +{ + char buff[5]; + int mask = lua_gethookmask(L); + lua_Hook hook = lua_gethook(L); + if (hook != NULL && hook != hookf) { /* external hook? */ + lua_pushliteral(L, "external hook"); + } else { + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */ + } + lua_pushstring(L, unmakemask(mask, buff)); + lua_pushinteger(L, lua_gethookcount(L)); + return 3; +} + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(debug_debug) +{ + for (;;) { + char buffer[250]; + fputs("lua_debug> ", stderr); + if (fgets(buffer, sizeof(buffer), stdin) == 0 || + strcmp(buffer, "cont\n") == 0) + return 0; + if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || + lua_pcall(L, 0, 0, 0)) { + fputs(lua_tostring(L, -1), stderr); + fputs("\n", stderr); + } + lua_settop(L, 0); /* remove eventual returns */ + } +} + +/* ------------------------------------------------------------------------ */ + +#define LEVELS1 12 /* size of the first part of the stack */ +#define LEVELS2 10 /* size of the second part of the stack */ + +LJLIB_CF(debug_traceback) +{ + int level; + int firstpart = 1; /* still before eventual `...' */ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + if (lua_isnumber(L, arg+2)) { + level = (int)lua_tointeger(L, arg+2); + lua_pop(L, 1); + } + else + level = (L == L1) ? 1 : 0; /* level 0 may be this own function */ + if (lua_gettop(L) == arg) + lua_pushliteral(L, ""); + else if (!lua_isstring(L, arg+1)) return 1; /* message is not a string */ + else lua_pushliteral(L, "\n"); + lua_pushliteral(L, "stack traceback:"); + while (lua_getstack(L1, level++, &ar)) { + if (level > LEVELS1 && firstpart) { + /* no more than `LEVELS2' more levels? */ + if (!lua_getstack(L1, level+LEVELS2, &ar)) { + level--; /* keep going */ + } else { + lua_pushliteral(L, "\n\t..."); /* too many levels */ + /* This only works with LuaJIT 2.x. Avoids O(n^2) behaviour. */ + lua_getstack(L1, -10, &ar); + level = ar.i_ci - LEVELS2; + } + firstpart = 0; + continue; + } + lua_pushliteral(L, "\n\t"); + lua_getinfo(L1, "Snl", &ar); + lua_pushfstring(L, "%s:", ar.short_src); + if (ar.currentline > 0) + lua_pushfstring(L, "%d:", ar.currentline); + if (*ar.namewhat != '\0') { /* is there a name? */ + lua_pushfstring(L, " in function " LUA_QS, ar.name); + } else { + if (*ar.what == 'm') /* main? */ + lua_pushfstring(L, " in main chunk"); + else if (*ar.what == 'C' || *ar.what == 't') + lua_pushliteral(L, " ?"); /* C function or tail call */ + else + lua_pushfstring(L, " in function <%s:%d>", + ar.short_src, ar.linedefined); + } + lua_concat(L, lua_gettop(L) - arg); + } + lua_concat(L, lua_gettop(L) - arg); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_debug(lua_State *L) +{ + LJ_LIB_REG(L, LUA_DBLIBNAME, debug); + return 1; +} + diff --git a/src/luajit2/src/lib_ffi.c b/src/luajit2/src/lib_ffi.c new file mode 100644 index 0000000000..582e9bf706 --- /dev/null +++ b/src/luajit2/src/lib_ffi.c @@ -0,0 +1,757 @@ +/* +** FFI library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_ffi_c +#define LUA_LIB + +#include <errno.h> + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_ctype.h" +#include "lj_cparse.h" +#include "lj_cdata.h" +#include "lj_cconv.h" +#include "lj_carith.h" +#include "lj_ccall.h" +#include "lj_clib.h" +#include "lj_ff.h" +#include "lj_lib.h" + +/* -- C type checks ------------------------------------------------------- */ + +/* Check first argument for a C type and returns its ID. */ +static CTypeID ffi_checkctype(lua_State *L, CTState *cts) +{ + TValue *o = L->base; + if (!(o < L->top)) { + err_argtype: + lj_err_argtype(L, 1, "C type"); + } + if (tvisstr(o)) { /* Parse an abstract C type declaration. */ + GCstr *s = strV(o); + CPState cp; + int errcode; + cp.L = L; + cp.cts = cts; + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; + errcode = lj_cparse(&cp); + if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ + return cp.val.id; + } else { + GCcdata *cd; + if (!tviscdata(o)) goto err_argtype; + cd = cdataV(o); + return cd->typeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->typeid; + } +} + +/* Check argument for C data and return it. */ +static GCcdata *ffi_checkcdata(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tviscdata(o))) + lj_err_argt(L, narg, LUA_TCDATA); + return cdataV(o); +} + +/* Convert argument to C pointer. */ +static void *ffi_checkptr(lua_State *L, int narg, CTypeID id) +{ + CTState *cts = ctype_cts(L); + TValue *o = L->base + narg-1; + void *p; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg)); + return p; +} + +/* Convert argument to int32_t. */ +static int32_t ffi_checkint(lua_State *L, int narg) +{ + CTState *cts = ctype_cts(L); + TValue *o = L->base + narg-1; + int32_t i; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, + CCF_ARG(narg)); + return i; +} + +/* -- C type metamethods -------------------------------------------------- */ + +#define LJLIB_MODULE_ffi_meta + +/* Handle ctype __index/__newindex metamethods. */ +static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm) +{ + CTypeID id = ctype_typeid(cts, ct); + cTValue *tv = lj_ctype_meta(cts, id, mm); + TValue *base = L->base; + if (!tv) { + const char *s; + err_index: + s = strdata(lj_ctype_repr(L, id, NULL)); + if (tvisstr(L->base+1)) + lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1)); + else + lj_err_callerv(L, LJ_ERR_FFI_BADIDX, s); + } + if (!tvisfunc(tv)) { + if (mm == MM_index) { + cTValue *o = lj_meta_tget(L, tv, base+1); + if (o) { + if (tvisnil(o)) goto err_index; + copyTV(L, L->top-1, o); + return 1; + } + } else { + TValue *o = lj_meta_tset(L, tv, base+1); + if (o) { + copyTV(L, o, base+2); + return 0; + } + } + tv = L->top-1; + } + return lj_meta_tailcall(L, tv); +} + +LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0) +{ + CTState *cts = ctype_cts(L); + CTInfo qual = 0; + CType *ct; + uint8_t *p; + TValue *o = L->base; + if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */ + lj_err_argt(L, 1, LUA_TCDATA); + ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); + if ((qual & 1)) + return ffi_index_meta(L, cts, ct, MM_index); + if (lj_cdata_get(cts, ct, L->top-1, p)) + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1) +{ + CTState *cts = ctype_cts(L); + CTInfo qual = 0; + CType *ct; + uint8_t *p; + TValue *o = L->base; + if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */ + lj_err_argt(L, 1, LUA_TCDATA); + ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual); + if ((qual & 1)) { + if ((qual & CTF_CONST)) + lj_err_caller(L, LJ_ERR_FFI_WRCONST); + return ffi_index_meta(L, cts, ct, MM_newindex); + } + lj_cdata_set(cts, ct, p, o+2, qual); + return 0; +} + +/* Common handler for cdata arithmetic. */ +static int ffi_arith(lua_State *L) +{ + MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq); + return lj_carith_op(L, mm); +} + +/* The following functions must be in contiguous ORDER MM. */ +LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat) +{ + return ffi_arith(L); +} + +/* Handle ctype __call metamethod. */ +static int ffi_call_meta(lua_State *L, CTypeID id) +{ + CTState *cts = ctype_cts(L); + cTValue *tv = lj_ctype_meta(cts, id, MM_call); + if (!tv) + lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL))); + return lj_meta_tailcall(L, tv); +} + +/* Forward declaration. */ +static int lj_cf_ffi_new(lua_State *L); + +LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + int ret; + if (cd->typeid == CTID_CTYPEID) + return lj_cf_ffi_new(L); + if ((ret = lj_ccall_func(L, cd)) < 0) + return ffi_call_meta(L, cd->typeid); + return ret; +} + +LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow) +{ + return ffi_arith(L); +} + +LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm) +{ + return ffi_arith(L); +} +/* End of contiguous ORDER MM. */ + +LJLIB_CF(ffi_meta___tostring) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + const char *msg = "cdata<%s>: %p"; + CTypeID id = cd->typeid; + void *p = cdataptr(cd); + if (id == CTID_CTYPEID) { + msg = "ctype<%s>"; + id = *(CTypeID *)p; + } else { + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, id); + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + if (ctype_iscomplex(ct->info)) { + setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size)); + goto checkgc; + } else if (ct->size == 8 && ctype_isinteger(ct->info)) { + setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd), + (ct->info & CTF_UNSIGNED))); + goto checkgc; + } else if (ctype_isfunc(ct->info)) { + p = *(void **)p; + } else { + if (ctype_isptr(ct->info)) { + p = cdata_getptr(p, ct->size); + ct = ctype_rawchild(cts, ct); + } + if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) { + /* Handle ctype __tostring metamethod. */ + cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring); + if (tv) + return lj_meta_tailcall(L, tv); + } + } + } + lj_str_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p); +checkgc: + lj_gc_check(L); + return 1; +} + +LJLIB_PUSH("ffi") LJLIB_SET(__metatable) + +#include "lj_libdef.h" + +/* -- C library metamethods ----------------------------------------------- */ + +#define LJLIB_MODULE_ffi_clib + +/* Index C library by a name. */ +static TValue *ffi_clib_index(lua_State *L) +{ + TValue *o = L->base; + CLibrary *cl; + if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)) + lj_err_argt(L, 1, LUA_TUSERDATA); + cl = (CLibrary *)uddata(udataV(o)); + if (!(o+1 < L->top && tvisstr(o+1))) + lj_err_argt(L, 2, LUA_TSTRING); + return lj_clib_index(L, cl, strV(o+1)); +} + +LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index) +{ + TValue *tv = ffi_clib_index(L); + if (tviscdata(tv)) { + CTState *cts = ctype_cts(L); + GCcdata *cd = cdataV(tv); + CType *s = ctype_get(cts, cd->typeid); + if (ctype_isextern(s->info)) { + CTypeID sid = ctype_cid(s->info); + void *sp = *(void **)cdataptr(cd); + if (lj_cconv_tv_ct(cts, ctype_raw(cts, sid), sid, L->top-1, sp)) + lj_gc_check(L); + return 1; + } + } + copyTV(L, L->top-1, tv); + return 1; +} + +LJLIB_CF(ffi_clib___newindex) +{ + TValue *tv = ffi_clib_index(L); + TValue *o = L->base+2; + if (o < L->top && tviscdata(tv)) { + CTState *cts = ctype_cts(L); + GCcdata *cd = cdataV(tv); + CType *d = ctype_get(cts, cd->typeid); + if (ctype_isextern(d->info)) { + CTInfo qual = 0; + for (;;) { /* Skip attributes and collect qualifiers. */ + d = ctype_child(cts, d); + if (!ctype_isattrib(d->info)) break; + if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; + } + if (!((d->info|qual) & CTF_CONST)) { + lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0); + return 0; + } + } + } + lj_err_caller(L, LJ_ERR_FFI_WRCONST); + return 0; /* unreachable */ +} + +LJLIB_CF(ffi_clib___gc) +{ + TValue *o = L->base; + if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB) + lj_clib_unload((CLibrary *)uddata(udataV(o))); + return 0; +} + +#include "lj_libdef.h" + +/* -- FFI library functions ----------------------------------------------- */ + +#define LJLIB_MODULE_ffi + +LJLIB_CF(ffi_cdef) +{ + GCstr *s = lj_lib_checkstr(L, 1); + CPState cp; + int errcode; + cp.L = L; + cp.cts = ctype_cts(L); + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT; + errcode = lj_cparse(&cp); + if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */ + lj_gc_check(L); + return 0; +} + +LJLIB_CF(ffi_new) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + CType *ct = ctype_raw(cts, id); + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + TValue *o = L->base+1; + GCcdata *cd; + if ((info & CTF_VLA)) { + o++; + sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); + } + if (sz == CTSIZE_INVALID) + lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE); + if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN) + cd = lj_cdata_new(cts, id, sz); + else + cd = lj_cdata_newv(cts, id, sz, ctype_align(info)); + setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */ + lj_cconv_ct_init(cts, ct, sz, cdataptr(cd), + o, (MSize)(L->top - o)); /* Initialize cdata. */ + if (ctype_isstruct(ct->info)) { + /* Handle ctype __gc metamethod. Use the fast lookup here. */ + cTValue *tv = lj_tab_getint(cts->metatype, (int32_t)id); + if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) { + GCtab *t = cts->finalizer; + if (gcref(t->metatable)) { + /* Add to finalizer table, if still enabled. */ + copyTV(L, lj_tab_set(L, t, o-1), tv); + lj_gc_anybarriert(L, t); + cd->marked |= LJ_GC_CDATA_FIN; + } + } + } + L->top = o; /* Only return the cdata itself. */ + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + CType *d = ctype_raw(cts, id); + TValue *o = lj_lib_checkany(L, 2); + L->top = o+1; /* Make sure this is the last item on the stack. */ + if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + if (!(tviscdata(o) && cdataV(o)->typeid == id)) { + GCcdata *cd = lj_cdata_new(cts, id, d->size); + lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST); + setcdataV(L, o, cd); + lj_gc_check(L); + } + return 1; +} + +LJLIB_CF(ffi_typeof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4); + *(CTypeID *)cdataptr(cd) = id; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_istype) LJLIB_REC(ffi_istype) +{ + CTState *cts = ctype_cts(L); + CTypeID id1 = ffi_checkctype(L, cts); + TValue *o = lj_lib_checkany(L, 2); + int b = 0; + if (tviscdata(o)) { + GCcdata *cd = cdataV(o); + CTypeID id2 = cd->typeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : + cd->typeid; + CType *ct1 = lj_ctype_rawref(cts, id1); + CType *ct2 = lj_ctype_rawref(cts, id2); + if (ct1 == ct2) { + b = 1; + } else if (ctype_type(ct1->info) == ctype_type(ct2->info) && + ct1->size == ct2->size) { + if (ctype_ispointer(ct1->info)) + b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL); + else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info)) + b = (((ct1->info ^ ct2->info) & ~CTF_QUAL) == 0); + } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) && + ct1 == ctype_rawchild(cts, ct2)) { + b = 1; + } + } + setboolV(L->top-1, b); + setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ + return 1; +} + +LJLIB_CF(ffi_sizeof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + CTSize sz; + if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) { + sz = cdatavlen(cdataV(L->base)); + } else { + CType *ct = lj_ctype_rawref(cts, id); + if (ctype_isvltype(ct->info)) + sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2)); + else + sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; + if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) { + setnilV(L->top-1); + return 1; + } + } + setintV(L->top-1, (int32_t)sz); + return 1; +} + +LJLIB_CF(ffi_alignof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + CTSize sz = 0; + CTInfo info = lj_ctype_info(cts, id, &sz); + setintV(L->top-1, 1 << ctype_align(info)); + return 1; +} + +LJLIB_CF(ffi_offsetof) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + GCstr *name = lj_lib_checkstr(L, 2); + CType *ct = lj_ctype_rawref(cts, id); + CTSize ofs; + if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) { + CType *fct = lj_ctype_getfield(cts, ct, name, &ofs); + if (fct) { + setintV(L->top-1, ofs); + if (ctype_isfield(fct->info)) { + return 1; + } else if (ctype_isbitfield(fct->info)) { + setintV(L->top++, ctype_bitpos(fct->info)); + setintV(L->top++, ctype_bitbsz(fct->info)); + return 3; + } + } + } + return 0; +} + +LJLIB_CF(ffi_errno) +{ + int err = errno; + if (L->top > L->base) + errno = ffi_checkint(L, 1); + setintV(L->top++, err); + return 1; +} + +LJLIB_CF(ffi_string) LJLIB_REC(.) +{ + CTState *cts = ctype_cts(L); + TValue *o = lj_lib_checkany(L, 1); + const char *p; + size_t len; + if (o+1 < L->top) { + len = (size_t)ffi_checkint(L, 2); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o, + CCF_ARG(1)); + } else { + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o, + CCF_ARG(1)); + len = strlen(p); + } + L->top = o+1; /* Make sure this is the last item on the stack. */ + setstrV(L, o, lj_str_new(L, p, len)); + lj_gc_check(L); + return 1; +} + +LJLIB_CF(ffi_copy) LJLIB_REC(.) +{ + void *dp = ffi_checkptr(L, 1, CTID_P_VOID); + void *sp = ffi_checkptr(L, 2, CTID_P_CVOID); + TValue *o = L->base+1; + CTSize len; + if (tvisstr(o) && o+1 >= L->top) + len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */ + else + len = (CTSize)ffi_checkint(L, 3); + memcpy(dp, sp, len); + return 0; +} + +LJLIB_CF(ffi_fill) LJLIB_REC(.) +{ + void *dp = ffi_checkptr(L, 1, CTID_P_VOID); + CTSize len = (CTSize)ffi_checkint(L, 2); + int32_t fill = 0; + if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3); + memset(dp, fill, len); + return 0; +} + +#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) + +/* Test ABI string. */ +LJLIB_CF(ffi_abi) LJLIB_REC(.) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int b = 0; + switch (s->hash) { +#if LJ_64 + case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */ +#else + case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */ +#endif +#if LJ_ARCH_HASFPU + case H_(e33ee463,e33ee463): b = 1; break; /* fpu */ +#endif +#if LJ_ABI_SOFTFP + case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */ +#else + case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */ +#endif +#if LJ_ABI_EABI + case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */ +#endif +#if LJ_ABI_WIN + case H_(4ab624a8,4ab624a8): b = 1; break; /* win */ +#endif + case H_(3af93066,1f001464): b = 1; break; /* le/be */ + default: + break; + } + setboolV(L->top-1, b); + setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ + return 1; +} + +#undef H_ + +LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to metatype table. */ + +LJLIB_CF(ffi_metatype) +{ + CTState *cts = ctype_cts(L); + CTypeID id = ffi_checkctype(L, cts); + GCtab *mt = lj_lib_checktab(L, 2); + GCtab *t = cts->metatype; + CType *ct = ctype_get(cts, id); /* Only allow raw types. */ + TValue *tv; + GCcdata *cd; + if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) || + ctype_isvector(ct->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + tv = lj_tab_setint(L, t, (int32_t)id); + if (!tvisnil(tv)) + lj_err_caller(L, LJ_ERR_PROTMT); + settabV(L, tv, mt); + lj_gc_anybarriert(L, t); + cd = lj_cdata_new(cts, CTID_CTYPEID, 4); + *(CTypeID *)cdataptr(cd) = id; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */ + +LJLIB_CF(ffi_gc) +{ + GCcdata *cd = ffi_checkcdata(L, 1); + TValue *fin = lj_lib_checkany(L, 2); + CTState *cts = ctype_cts(L); + GCtab *t = cts->finalizer; + CType *ct = ctype_raw(cts, cd->typeid); + if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) || + ctype_isrefarray(ct->info))) + lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); + if (gcref(t->metatable)) { /* Update finalizer table, if still enabled. */ + copyTV(L, lj_tab_set(L, t, L->base), fin); + lj_gc_anybarriert(L, t); + if (!tvisnil(fin)) + cd->marked |= LJ_GC_CDATA_FIN; + else + cd->marked &= ~LJ_GC_CDATA_FIN; + } + L->top = L->base+1; /* Pass through the cdata object. */ + return 1; +} + +LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */ + +LJLIB_CF(ffi_load) +{ + GCstr *name = lj_lib_checkstr(L, 1); + int global = (L->base+1 < L->top && tvistruecond(L->base+1)); + lj_clib_load(L, tabref(curr_func(L)->c.env), name, global); + return 1; +} + +LJLIB_PUSH(top-4) LJLIB_SET(C) +LJLIB_PUSH(top-3) LJLIB_SET(os) +LJLIB_PUSH(top-2) LJLIB_SET(arch) + +#include "lj_libdef.h" + +/* ------------------------------------------------------------------------ */ + +/* Create special weak-keyed finalizer table. */ +static GCtab *ffi_finalizer(lua_State *L) +{ + /* NOBARRIER: The table is new (marked white). */ + GCtab *t = lj_tab_new(L, 0, 1); + settabV(L, L->top++, t); + setgcref(t->metatable, obj2gco(t)); + setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), + lj_str_newlit(L, "K")); + t->nomm = (uint8_t)(~(1u<<MM_mode)); + return t; +} + +/* Register FFI module as loaded. */ +static void ffi_register_module(lua_State *L) +{ + cTValue *tmp = lj_tab_getstr(tabV(registry(L)), lj_str_newlit(L, "_LOADED")); + if (tmp && tvistab(tmp)) { + GCtab *t = tabV(tmp); + copyTV(L, lj_tab_setstr(L, t, lj_str_newlit(L, LUA_FFILIBNAME)), L->top-1); + lj_gc_anybarriert(L, t); + } +} + +LUALIB_API int luaopen_ffi(lua_State *L) +{ + CTState *cts = lj_ctype_init(L); + settabV(L, L->top++, (cts->metatype = lj_tab_new(L, 0, 0))); + cts->finalizer = ffi_finalizer(L); + LJ_LIB_REG(L, NULL, ffi_meta); + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1))); + LJ_LIB_REG(L, NULL, ffi_clib); + lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */ + lua_pushliteral(L, LJ_OS_NAME); + lua_pushliteral(L, LJ_ARCH_NAME); + LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */ + ffi_register_module(L); + return 1; +} + +#endif diff --git a/src/luajit2/src/lib_init.c b/src/luajit2/src/lib_init.c new file mode 100644 index 0000000000..8501e21de1 --- /dev/null +++ b/src/luajit2/src/lib_init.c @@ -0,0 +1,53 @@ +/* +** Library initialization. +** Major parts taken verbatim from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_init_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_arch.h" + +static const luaL_Reg lj_lib_load[] = { + { "", luaopen_base }, + { LUA_LOADLIBNAME, luaopen_package }, + { LUA_TABLIBNAME, luaopen_table }, + { LUA_IOLIBNAME, luaopen_io }, + { LUA_OSLIBNAME, luaopen_os }, + { LUA_STRLIBNAME, luaopen_string }, + { LUA_MATHLIBNAME, luaopen_math }, + { LUA_DBLIBNAME, luaopen_debug }, + { LUA_BITLIBNAME, luaopen_bit }, + { LUA_JITLIBNAME, luaopen_jit }, + { NULL, NULL } +}; + +static const luaL_Reg lj_lib_preload[] = { +#if LJ_HASFFI + { LUA_FFILIBNAME, luaopen_ffi }, +#endif + { NULL, NULL } +}; + +LUALIB_API void luaL_openlibs(lua_State *L) +{ + const luaL_Reg *lib; + for (lib = lj_lib_load; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_pushstring(L, lib->name); + lua_call(L, 1, 0); + } + luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", + sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1); + for (lib = lj_lib_preload; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_setfield(L, -2, lib->name); + } + lua_pop(L, 1); +} + diff --git a/src/luajit2/src/lib_io.c b/src/luajit2/src/lib_io.c new file mode 100644 index 0000000000..7a59cc4ff6 --- /dev/null +++ b/src/luajit2/src/lib_io.c @@ -0,0 +1,533 @@ +/* +** I/O library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <errno.h> +#include <stdio.h> + +#define lib_io_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_ff.h" +#include "lj_trace.h" +#include "lj_lib.h" + +/* Userdata payload for I/O file. */ +typedef struct IOFileUD { + FILE *fp; /* File handle. */ + uint32_t type; /* File type. */ +} IOFileUD; + +#define IOFILE_TYPE_FILE 0 /* Regular file. */ +#define IOFILE_TYPE_PIPE 1 /* Pipe. */ +#define IOFILE_TYPE_STDF 2 /* Standard file handle. */ +#define IOFILE_TYPE_MASK 3 + +#define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */ + +#define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud) +#define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id)))) + +/* -- Error handling ------------------------------------------------------ */ + +static int io_pushresult(lua_State *L, int ok, const char *fname) +{ + if (ok) { + setboolV(L->top++, 1); + return 1; + } else { + int en = errno; /* Lua API calls may change this value. */ + setnilV(L->top++); + if (fname) + lua_pushfstring(L, "%s: %s", fname, strerror(en)); + else + lua_pushfstring(L, "%s", strerror(en)); + setintV(L->top++, en); + lj_trace_abort(G(L)); + return 3; + } +} + +/* -- Open/close helpers -------------------------------------------------- */ + +static IOFileUD *io_tofilep(lua_State *L) +{ + if (!(L->base < L->top && tvisudata(L->base) && + udataV(L->base)->udtype == UDTYPE_IO_FILE)) + lj_err_argtype(L, 1, "FILE*"); + return (IOFileUD *)uddata(udataV(L->base)); +} + +static IOFileUD *io_tofile(lua_State *L) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp == NULL) + lj_err_caller(L, LJ_ERR_IOCLFL); + return iof; +} + +static FILE *io_stdfile(lua_State *L, ptrdiff_t id) +{ + IOFileUD *iof = IOSTDF_IOF(L, id); + if (iof->fp == NULL) + lj_err_caller(L, LJ_ERR_IOSTDCL); + return iof->fp; +} + +static IOFileUD *io_file_new(lua_State *L) +{ + IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); + GCudata *ud = udataV(L->top-1); + ud->udtype = UDTYPE_IO_FILE; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcrefr(ud->metatable, curr_func(L)->c.env); + iof->fp = NULL; + iof->type = IOFILE_TYPE_FILE; + return iof; +} + +static IOFileUD *io_file_open(lua_State *L, const char *mode) +{ + const char *fname = strdata(lj_lib_checkstr(L, 1)); + IOFileUD *iof = io_file_new(L); + iof->fp = fopen(fname, mode); + if (iof->fp == NULL) + luaL_argerror(L, 1, lj_str_pushf(L, "%s: %s", fname, strerror(errno))); + return iof; +} + +static int io_file_close(lua_State *L, IOFileUD *iof) +{ + int ok; + if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) { + ok = (fclose(iof->fp) == 0); + } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) { +#if LJ_TARGET_POSIX + ok = (pclose(iof->fp) != -1); +#elif LJ_TARGET_WINDOWS + ok = (_pclose(iof->fp) != -1); +#else + ok = 0; +#endif + } else { + lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); + setnilV(L->top++); + lua_pushliteral(L, "cannot close standard file"); + return 2; + } + iof->fp = NULL; + return io_pushresult(L, ok, NULL); +} + +/* -- Read/write helpers -------------------------------------------------- */ + +static int io_file_readnum(lua_State *L, FILE *fp) +{ + lua_Number d; + if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) { + if (LJ_DUALNUM) { + int32_t i = lj_num2int(d); + if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) { + setintV(L->top++, i); + return 1; + } + } + setnumV(L->top++, d); + return 1; + } else { + setnilV(L->top++); + return 0; + } +} + +static int io_file_testeof(lua_State *L, FILE *fp) +{ + int c = getc(fp); + ungetc(c, fp); + lua_pushlstring(L, NULL, 0); + return (c != EOF); +} + +static int io_file_readline(lua_State *L, FILE *fp, size_t chop) +{ + luaL_Buffer b; + luaL_buffinit(L, &b); + for (;;) { + size_t len; + char *p = luaL_prepbuffer(&b); + if (fgets(p, LUAL_BUFFERSIZE, fp) == NULL) { /* EOF? */ + luaL_pushresult(&b); + return (strV(L->top-1)->len > 0); /* Anything read? */ + } + len = strlen(p); + if (len == 0 || p[len-1] != '\n') { /* Partial line? */ + luaL_addsize(&b, len); + } else { + luaL_addsize(&b, len - chop); /* Keep or remove EOL. */ + luaL_pushresult(&b); + return 1; /* Got at least an EOL. */ + } + } +} + +static int io_file_readchars(lua_State *L, FILE *fp, size_t n) +{ + size_t rlen; /* how much to read */ + size_t nr; /* number of chars actually read */ + luaL_Buffer b; + luaL_buffinit(L, &b); + rlen = LUAL_BUFFERSIZE; /* try to read that much each time */ + do { + char *p = luaL_prepbuffer(&b); + if (rlen > n) rlen = n; /* cannot read more than asked */ + nr = fread(p, 1, rlen, fp); + luaL_addsize(&b, nr); + n -= nr; /* still have to read `n' chars */ + } while (n > 0 && nr == rlen); /* until end of count or eof */ + luaL_pushresult(&b); /* close buffer */ + return (n == 0 || strV(L->top-1)->len > 0); +} + +static int io_file_read(lua_State *L, FILE *fp, int start) +{ + int ok, n, nargs = (int)(L->top - L->base) - start; + clearerr(fp); + if (nargs == 0) { + ok = io_file_readline(L, fp, 1); + n = start+1; /* Return 1 result. */ + } else { + /* The results plus the buffers go on top of the args. */ + luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); + ok = 1; + for (n = start; nargs-- && ok; n++) { + if (tvisstr(L->base+n)) { + const char *p = strVdata(L->base+n); + if (p[0] != '*') + lj_err_arg(L, n+1, LJ_ERR_INVOPT); + if (p[1] == 'n') + ok = io_file_readnum(L, fp); + else if ((p[1] & ~0x20) == 'L') + ok = io_file_readline(L, fp, (p[1] == 'l')); + else if (p[1] == 'a') + io_file_readchars(L, fp, ~((size_t)0)); + else + lj_err_arg(L, n+1, LJ_ERR_INVFMT); + } else if (tvisnumber(L->base+n)) { + size_t len = (size_t)lj_lib_checkint(L, n+1); + ok = len ? io_file_readchars(L, fp, len) : io_file_testeof(L, fp); + } else { + lj_err_arg(L, n+1, LJ_ERR_INVOPT); + } + } + } + if (ferror(fp)) + return io_pushresult(L, 0, NULL); + if (!ok) + setnilV(L->top-1); /* Replace last result with nil. */ + return n - start; +} + +static int io_file_write(lua_State *L, FILE *fp, int start) +{ + cTValue *tv; + int status = 1; + for (tv = L->base+start; tv < L->top; tv++) { + if (tvisstr(tv)) { + MSize len = strV(tv)->len; + status = status && (fwrite(strVdata(tv), 1, len, fp) == len); + } else if (tvisint(tv)) { + char buf[LJ_STR_INTBUF]; + char *p = lj_str_bufint(buf, intV(tv)); + size_t len = (size_t)(buf+LJ_STR_INTBUF-p); + status = status && (fwrite(p, 1, len, fp) == len); + } else if (tvisnum(tv)) { + status = status && (fprintf(fp, LUA_NUMBER_FMT, numV(tv)) > 0); + } else { + lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING); + } + } + return io_pushresult(L, status, NULL); +} + +/* -- I/O file methods ---------------------------------------------------- */ + +#define LJLIB_MODULE_io_method + +LJLIB_CF(io_method_close) +{ + IOFileUD *iof = L->base < L->top ? io_tofile(L) : + IOSTDF_IOF(L, GCROOT_IO_OUTPUT); + return io_file_close(L, iof); +} + +LJLIB_CF(io_method_read) +{ + return io_file_read(L, io_tofile(L)->fp, 1); +} + +LJLIB_CF(io_method_write) LJLIB_REC(io_write 0) +{ + return io_file_write(L, io_tofile(L)->fp, 1); +} + +LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0) +{ + return io_pushresult(L, fflush(io_tofile(L)->fp) == 0, NULL); +} + +LJLIB_CF(io_method_seek) +{ + FILE *fp = io_tofile(L)->fp; + int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end"); + int64_t ofs = 0; + cTValue *o; + int res; + if (opt == 0) opt = SEEK_SET; + else if (opt == 1) opt = SEEK_CUR; + else if (opt == 2) opt = SEEK_END; + o = L->base+2; + if (o < L->top) { + if (tvisint(o)) + ofs = (int64_t)intV(o); + else if (tvisnum(o)) + ofs = (int64_t)numV(o); + else if (!tvisnil(o)) + lj_err_argt(L, 3, LUA_TNUMBER); + } +#if LJ_TARGET_POSIX + res = fseeko(fp, ofs, opt); +#elif _MSC_VER >= 1400 + res = _fseeki64(fp, ofs, opt); +#elif defined(__MINGW32__) + res = fseeko64(fp, ofs, opt); +#else + res = fseek(fp, (long)ofs, opt); +#endif + if (res) + return io_pushresult(L, 0, NULL); +#if LJ_TARGET_POSIX + ofs = ftello(fp); +#elif _MSC_VER >= 1400 + ofs = _ftelli64(fp); +#elif defined(__MINGW32__) + ofs = ftello64(fp); +#else + ofs = (int64_t)ftell(fp); +#endif + setint64V(L->top-1, ofs); + return 1; +} + +LJLIB_CF(io_method_setvbuf) +{ + FILE *fp = io_tofile(L)->fp; + int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no"); + size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE); + if (opt == 0) opt = _IOFBF; + else if (opt == 1) opt = _IOLBF; + else if (opt == 2) opt = _IONBF; + return io_pushresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL); +} + +LJLIB_PUSH(top-2) /* io_lines_iter */ +LJLIB_CF(io_method_lines) +{ + io_tofile(L); + setfuncV(L, L->top, funcV(lj_lib_upvalue(L, 1))); + setudataV(L, L->top+1, udataV(L->base)); + L->top += 2; + return 2; +} + +LJLIB_CF(io_method___gc) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF) + io_file_close(L, iof); + return 0; +} + +LJLIB_CF(io_method___tostring) +{ + IOFileUD *iof = io_tofilep(L); + if (iof->fp != NULL) + lua_pushfstring(L, "file (%p)", iof->fp); + else + lua_pushliteral(L, "file (closed)"); + return 1; +} + +LJLIB_PUSH(top-1) LJLIB_SET(__index) + +#include "lj_libdef.h" + +/* -- I/O library functions ----------------------------------------------- */ + +#define LJLIB_MODULE_io + +LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */ + +LJLIB_CF(io_open) +{ + const char *fname = strdata(lj_lib_checkstr(L, 1)); + GCstr *s = lj_lib_optstr(L, 2); + const char *mode = s ? strdata(s) : "r"; + IOFileUD *iof = io_file_new(L); + iof->fp = fopen(fname, mode); + return iof->fp != NULL ? 1 : io_pushresult(L, 0, fname); +} + +LJLIB_CF(io_popen) +{ +#if LJ_TARGET_POSIX || LJ_TARGET_WINDOWS + const char *fname = strdata(lj_lib_checkstr(L, 1)); + GCstr *s = lj_lib_optstr(L, 2); + const char *mode = s ? strdata(s) : "r"; + IOFileUD *iof = io_file_new(L); + iof->type = IOFILE_TYPE_PIPE; +#if LJ_TARGET_POSIX + fflush(NULL); + iof->fp = popen(fname, mode); +#else + iof->fp = _popen(fname, mode); +#endif + return iof->fp != NULL ? 1 : io_pushresult(L, 0, fname); +#else + luaL_error(L, LUA_QL("popen") " not supported"); +#endif +} + +LJLIB_CF(io_tmpfile) +{ + IOFileUD *iof = io_file_new(L); + iof->fp = tmpfile(); + return iof->fp != NULL ? 1 : io_pushresult(L, 0, NULL); +} + +LJLIB_CF(io_close) +{ + return lj_cf_io_method_close(L); +} + +LJLIB_CF(io_read) +{ + return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0); +} + +LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT) +{ + return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0); +} + +LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT) +{ + return io_pushresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)) == 0, NULL); +} + +static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode) +{ + if (L->base < L->top && !tvisnil(L->base)) { + if (tvisudata(L->base)) { + io_tofile(L); + L->top = L->base+1; + } else { + io_file_open(L, mode); + } + /* NOBARRIER: The standard I/O handles are GC roots. */ + setgcref(G(L)->gcroot[id], gcV(L->top-1)); + } else { + setudataV(L, L->top++, IOSTDF_UD(L, id)); + } + return 1; +} + +LJLIB_CF(io_input) +{ + return io_std_getset(L, GCROOT_IO_INPUT, "r"); +} + +LJLIB_CF(io_output) +{ + return io_std_getset(L, GCROOT_IO_OUTPUT, "w"); +} + +LJLIB_NOREG LJLIB_CF(io_lines_iter) +{ + IOFileUD *iof = io_tofile(L); + int ok = io_file_readline(L, iof->fp, 1); + if (ferror(iof->fp)) + lj_err_callermsg(L, strerror(errno)); + if (!ok && (iof->type & IOFILE_FLAG_CLOSE)) + io_file_close(L, iof); /* Return values are ignored (ok is 0). */ + return ok; +} + +LJLIB_PUSH(top-3) /* io_lines_iter */ +LJLIB_CF(io_lines) +{ + if (L->base < L->top && !tvisnil(L->base)) { /* io.lines(fname) */ + IOFileUD *iof = io_file_open(L, "r"); + iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE; + setfuncV(L, L->top-2, funcV(lj_lib_upvalue(L, 1))); + } else { /* io.lines() iterates over stdin. */ + setfuncV(L, L->top, funcV(lj_lib_upvalue(L, 1))); + setudataV(L, L->top+1, IOSTDF_UD(L, GCROOT_IO_INPUT)); + L->top += 2; + } + return 2; +} + +LJLIB_CF(io_type) +{ + cTValue *o = lj_lib_checkany(L, 1); + if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE)) + setnilV(L->top++); + else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL) + lua_pushliteral(L, "file"); + else + lua_pushliteral(L, "closed file"); + return 1; +} + +#include "lj_libdef.h" + +/* ------------------------------------------------------------------------ */ + +static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name) +{ + IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); + GCudata *ud = udataV(L->top-1); + ud->udtype = UDTYPE_IO_FILE; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcref(ud->metatable, gcV(L->top-3)); + iof->fp = fp; + iof->type = IOFILE_TYPE_STDF; + lua_setfield(L, -2, name); + return obj2gco(ud); +} + +LUALIB_API int luaopen_io(lua_State *L) +{ + lj_lib_pushcf(L, lj_cf_io_lines_iter, FF_io_lines_iter); + LJ_LIB_REG(L, NULL, io_method); + copyTV(L, L->top, L->top-1); L->top++; + lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); + LJ_LIB_REG(L, LUA_IOLIBNAME, io); + setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin")); + setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout")); + io_std_new(L, stderr, "stderr"); + return 1; +} + diff --git a/src/luajit2/src/lib_jit.c b/src/luajit2/src/lib_jit.c new file mode 100644 index 0000000000..66b3856aee --- /dev/null +++ b/src/luajit2/src/lib_jit.c @@ -0,0 +1,635 @@ +/* +** JIT library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lib_jit_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_arch.h" +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_bc.h" +#if LJ_HASJIT +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_target.h" +#endif +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" +#include "lj_lib.h" + +#include "luajit.h" + +/* -- jit.* functions ----------------------------------------------------- */ + +#define LJLIB_MODULE_jit + +static int setjitmode(lua_State *L, int mode) +{ + int idx = 0; + if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */ + mode |= LUAJIT_MODE_ENGINE; + } else { + /* jit.on/off/flush(func|proto, nil|true|false) */ + if (tvisfunc(L->base) || tvisproto(L->base)) + idx = 1; + else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */ + goto err; + if (L->base+1 < L->top && tvisbool(L->base+1)) + mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC; + else + mode |= LUAJIT_MODE_FUNC; + } + if (luaJIT_setmode(L, idx, mode) != 1) { + if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE) + lj_err_caller(L, LJ_ERR_NOJIT); + err: + lj_err_argt(L, 1, LUA_TFUNCTION); + } + return 0; +} + +LJLIB_CF(jit_on) +{ + return setjitmode(L, LUAJIT_MODE_ON); +} + +LJLIB_CF(jit_off) +{ + return setjitmode(L, LUAJIT_MODE_OFF); +} + +LJLIB_CF(jit_flush) +{ +#if LJ_HASJIT + if (L->base < L->top && !tvisnil(L->base)) { + int traceno = lj_lib_checkint(L, 1); + luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE); + return 0; + } +#endif + return setjitmode(L, LUAJIT_MODE_FLUSH); +} + +#if LJ_HASJIT +/* Push a string for every flag bit that is set. */ +static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base, + const char *str) +{ + for (; *str; base <<= 1, str += 1+*str) + if (flags & base) + setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str)); +} +#endif + +LJLIB_CF(jit_status) +{ +#if LJ_HASJIT + jit_State *J = L2J(L); + L->top = L->base; + setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0); + flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING); + flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING); + return (int)(L->top - L->base); +#else + setboolV(L->top++, 0); + return 1; +#endif +} + +LJLIB_CF(jit_attach) +{ +#ifdef LUAJIT_DISABLE_VMEVENT + luaL_error(L, "vmevent API disabled"); +#else + GCfunc *fn = lj_lib_checkfunc(L, 1); + GCstr *s = lj_lib_optstr(L, 2); + luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE); + if (s) { /* Attach to given event. */ + const uint8_t *p = (const uint8_t *)strdata(s); + uint32_t h = s->len; + while (*p) h = h ^ (lj_rol(h, 6) + *p++); + lua_pushvalue(L, 1); + lua_rawseti(L, -2, VMEVENT_HASHIDX(h)); + G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */ + } else { /* Detach if no event given. */ + setnilV(L->top++); + while (lua_next(L, -2)) { + L->top--; + if (tvisfunc(L->top) && funcV(L->top) == fn) { + setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1)); + } + } + } +#endif + return 0; +} + +LJLIB_PUSH(top-5) LJLIB_SET(os) +LJLIB_PUSH(top-4) LJLIB_SET(arch) +LJLIB_PUSH(top-3) LJLIB_SET(version_num) +LJLIB_PUSH(top-2) LJLIB_SET(version) + +#include "lj_libdef.h" + +/* -- jit.util.* functions ------------------------------------------------ */ + +#define LJLIB_MODULE_jit_util + +/* -- Reflection API for Lua functions ------------------------------------ */ + +/* Return prototype of first argument (Lua function or prototype object) */ +static GCproto *check_Lproto(lua_State *L, int nolua) +{ + TValue *o = L->base; + if (L->top > o) { + if (tvisproto(o)) { + return protoV(o); + } else if (tvisfunc(o)) { + if (isluafunc(funcV(o))) + return funcproto(funcV(o)); + else if (nolua) + return NULL; + } + } + lj_err_argt(L, 1, LUA_TFUNCTION); + return NULL; /* unreachable */ +} + +static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val) +{ + setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val); +} + +/* local info = jit.util.funcinfo(func [,pc]) */ +LJLIB_CF(jit_util_funcinfo) +{ + GCproto *pt = check_Lproto(L, 1); + if (pt) { + BCPos pc = (BCPos)lj_lib_optint(L, 2, 0); + GCtab *t; + lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + setintfield(L, t, "linedefined", pt->firstline); + setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline); + setintfield(L, t, "stackslots", pt->framesize); + setintfield(L, t, "params", pt->numparams); + setintfield(L, t, "bytecodes", (int32_t)pt->sizebc); + setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc); + setintfield(L, t, "nconsts", (int32_t)pt->sizekn); + setintfield(L, t, "upvalues", (int32_t)pt->sizeuv); + if (pc < pt->sizebc) + setintfield(L, t, "currentline", lj_debug_line(pt, pc)); + lua_pushboolean(L, (pt->flags & PROTO_VARARG)); + lua_setfield(L, -2, "isvararg"); + lua_pushboolean(L, (pt->flags & PROTO_CHILD)); + lua_setfield(L, -2, "children"); + setstrV(L, L->top++, proto_chunkname(pt)); + lua_setfield(L, -2, "source"); + lj_debug_pushloc(L, pt, pc); + lua_setfield(L, -2, "loc"); + } else { + GCfunc *fn = funcV(L->base); + GCtab *t; + lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + if (!iscfunc(fn)) + setintfield(L, t, "ffid", fn->c.ffid); + setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")), + (intptr_t)(void *)fn->c.f); + setintfield(L, t, "upvalues", fn->c.nupvalues); + } + return 1; +} + +/* local ins, m = jit.util.funcbc(func, pc) */ +LJLIB_CF(jit_util_funcbc) +{ + GCproto *pt = check_Lproto(L, 0); + BCPos pc = (BCPos)lj_lib_checkint(L, 2); + if (pc < pt->sizebc) { + BCIns ins = proto_bc(pt)[pc]; + BCOp op = bc_op(ins); + lua_assert(op < BC__MAX); + setintV(L->top, ins); + setintV(L->top+1, lj_bc_mode[op]); + L->top += 2; + return 2; + } + return 0; +} + +/* local k = jit.util.funck(func, idx) */ +LJLIB_CF(jit_util_funck) +{ + GCproto *pt = check_Lproto(L, 0); + ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2); + if (idx >= 0) { + if (idx < (ptrdiff_t)pt->sizekn) { + copyTV(L, L->top-1, proto_knumtv(pt, idx)); + return 1; + } + } else { + if (~idx < (ptrdiff_t)pt->sizekgc) { + GCobj *gc = proto_kgc(pt, idx); + setgcV(L, L->top-1, gc, ~gc->gch.gct); + return 1; + } + } + return 0; +} + +/* local name = jit.util.funcuvname(func, idx) */ +LJLIB_CF(jit_util_funcuvname) +{ + GCproto *pt = check_Lproto(L, 0); + uint32_t idx = (uint32_t)lj_lib_checkint(L, 2); + if (idx < pt->sizeuv) { + setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx))); + return 1; + } + return 0; +} + +/* -- Reflection API for traces ------------------------------------------- */ + +#if LJ_HASJIT + +/* Check trace argument. Must not throw for non-existent trace numbers. */ +static GCtrace *jit_checktrace(lua_State *L) +{ + TraceNo tr = (TraceNo)lj_lib_checkint(L, 1); + jit_State *J = L2J(L); + if (tr > 0 && tr < J->sizetrace) + return traceref(J, tr); + return NULL; +} + +/* local info = jit.util.traceinfo(tr) */ +LJLIB_CF(jit_util_traceinfo) +{ + GCtrace *T = jit_checktrace(L); + if (T) { + GCtab *t; + lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */ + t = tabV(L->top-1); + setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1); + setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk); + setintfield(L, t, "link", T->link); + setintfield(L, t, "nexit", T->nsnap); + /* There are many more fields. Add them only when needed. */ + return 1; + } + return 0; +} + +/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */ +LJLIB_CF(jit_util_traceir) +{ + GCtrace *T = jit_checktrace(L); + IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; + if (T && ref >= REF_BIAS && ref < T->nins) { + IRIns *ir = &T->ir[ref]; + int32_t m = lj_ir_mode[ir->o]; + setintV(L->top-2, m); + setintV(L->top-1, ir->ot); + setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0)); + setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0)); + setintV(L->top++, ir->prev); + return 5; + } + return 0; +} + +/* local k, t [, slot] = jit.util.tracek(tr, idx) */ +LJLIB_CF(jit_util_tracek) +{ + GCtrace *T = jit_checktrace(L); + IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS; + if (T && ref >= T->nk && ref < REF_BIAS) { + IRIns *ir = &T->ir[ref]; + int32_t slot = -1; + if (ir->o == IR_KSLOT) { + slot = ir->op2; + ir = &T->ir[ir->op1]; + } + lj_ir_kvalue(L, L->top-2, ir); + setintV(L->top-1, (int32_t)irt_type(ir->t)); + if (slot == -1) + return 2; + setintV(L->top++, slot); + return 3; + } + return 0; +} + +/* local snap = jit.util.tracesnap(tr, sn) */ +LJLIB_CF(jit_util_tracesnap) +{ + GCtrace *T = jit_checktrace(L); + SnapNo sn = (SnapNo)lj_lib_checkint(L, 2); + if (T && sn < T->nsnap) { + SnapShot *snap = &T->snap[sn]; + SnapEntry *map = &T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + GCtab *t; + lua_createtable(L, nent+2, 0); + t = tabV(L->top-1); + setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS); + setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots); + for (n = 0; n < nent; n++) + setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]); + setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0)); + return 1; + } + return 0; +} + +/* local mcode, addr, loop = jit.util.tracemc(tr) */ +LJLIB_CF(jit_util_tracemc) +{ + GCtrace *T = jit_checktrace(L); + if (T && T->mcode != NULL) { + setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode)); + setintptrV(L->top++, (intptr_t)(void *)T->mcode); + setintV(L->top++, T->mcloop); + return 3; + } + return 0; +} + +/* local addr = jit.util.traceexitstub(idx) */ +LJLIB_CF(jit_util_traceexitstub) +{ + ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1); + jit_State *J = L2J(L); + if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) { + setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno)); + return 1; + } + return 0; +} + +/* local addr = jit.util.ircalladdr(idx) */ +LJLIB_CF(jit_util_ircalladdr) +{ + uint32_t idx = (uint32_t)lj_lib_checkint(L, 1); + if (idx < IRCALL__MAX) { + setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func); + return 1; + } + return 0; +} + +#else + +static int trace_nojit(lua_State *L) +{ + UNUSED(L); + return 0; +} +#define lj_cf_jit_util_traceinfo trace_nojit +#define lj_cf_jit_util_traceir trace_nojit +#define lj_cf_jit_util_tracek trace_nojit +#define lj_cf_jit_util_tracesnap trace_nojit +#define lj_cf_jit_util_tracemc trace_nojit +#define lj_cf_jit_util_traceexitstub trace_nojit +#define lj_cf_jit_util_ircalladdr trace_nojit + +#endif + +#include "lj_libdef.h" + +/* -- jit.opt module ------------------------------------------------------ */ + +#define LJLIB_MODULE_jit_opt + +#if LJ_HASJIT +/* Parse optimization level. */ +static int jitopt_level(jit_State *J, const char *str) +{ + if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') { + uint32_t flags; + if (str[0] == '0') flags = JIT_F_OPT_0; + else if (str[0] == '1') flags = JIT_F_OPT_1; + else if (str[0] == '2') flags = JIT_F_OPT_2; + else flags = JIT_F_OPT_3; + J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags; + return 1; /* Ok. */ + } + return 0; /* No match. */ +} + +/* Parse optimization flag. */ +static int jitopt_flag(jit_State *J, const char *str) +{ + const char *lst = JIT_F_OPTSTRING; + uint32_t opt; + int set = 1; + if (str[0] == '+') { + str++; + } else if (str[0] == '-') { + str++; + set = 0; + } else if (str[0] == 'n' && str[1] == 'o') { + str += str[2] == '-' ? 3 : 2; + set = 0; + } + for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) { + size_t len = *(const uint8_t *)lst; + if (len == 0) + break; + if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') { + if (set) J->flags |= opt; else J->flags &= ~opt; + return 1; /* Ok. */ + } + lst += 1+len; + } + return 0; /* No match. */ +} + +/* Parse optimization parameter. */ +static int jitopt_param(jit_State *J, const char *str) +{ + const char *lst = JIT_P_STRING; + int i; + for (i = 0; i < JIT_P__MAX; i++) { + size_t len = *(const uint8_t *)lst; + lua_assert(len != 0); + if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { + int32_t n = 0; + const char *p = &str[len+1]; + while (*p >= '0' && *p <= '9') + n = n*10 + (*p++ - '0'); + if (*p) return 0; /* Malformed number. */ + J->param[i] = n; + if (i == JIT_P_hotloop) + lj_dispatch_init_hotcount(J2G(J)); + return 1; /* Ok. */ + } + lst += 1+len; + } + return 0; /* No match. */ +} +#endif + +/* jit.opt.start(flags...) */ +LJLIB_CF(jit_opt_start) +{ +#if LJ_HASJIT + jit_State *J = L2J(L); + int nargs = (int)(L->top - L->base); + if (nargs == 0) { + J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT; + } else { + int i; + for (i = 1; i <= nargs; i++) { + const char *str = strdata(lj_lib_checkstr(L, i)); + if (!jitopt_level(J, str) && + !jitopt_flag(J, str) && + !jitopt_param(J, str)) + lj_err_callerv(L, LJ_ERR_JITOPT, str); + } + } +#else + lj_err_caller(L, LJ_ERR_NOJIT); +#endif + return 0; +} + +#include "lj_libdef.h" + +/* -- JIT compiler initialization ----------------------------------------- */ + +#if LJ_HASJIT +/* Default values for JIT parameters. */ +static const int32_t jit_param_default[JIT_P__MAX+1] = { +#define JIT_PARAMINIT(len, name, value) (value), +JIT_PARAMDEF(JIT_PARAMINIT) +#undef JIT_PARAMINIT + 0 +}; +#endif + +#if LJ_TARGET_ARM && LJ_TARGET_LINUX +#include <sys/utsname.h> +#endif + +/* Arch-dependent CPU detection. */ +static uint32_t jit_cpudetect(lua_State *L) +{ + uint32_t flags = 0; +#if LJ_TARGET_X86ORX64 + uint32_t vendor[4]; + uint32_t features[4]; + if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) { +#if !LJ_HASJIT +#define JIT_F_CMOV 1 +#define JIT_F_SSE2 2 +#endif + flags |= ((features[3] >> 15)&1) * JIT_F_CMOV; + flags |= ((features[3] >> 26)&1) * JIT_F_SSE2; +#if LJ_HASJIT + flags |= ((features[2] >> 0)&1) * JIT_F_SSE3; + flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1; + if (vendor[2] == 0x6c65746e) { /* Intel. */ + if ((features[0] & 0x0ff00f00) == 0x00000f00) /* P4. */ + flags |= JIT_F_P4; /* Currently unused. */ + else if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */ + flags |= JIT_F_LEA_AGU; + } else if (vendor[2] == 0x444d4163) { /* AMD. */ + uint32_t fam = (features[0] & 0x0ff00f00); + if (fam == 0x00000f00) /* K8. */ + flags |= JIT_F_SPLIT_XMM; + if (fam >= 0x00000f00) /* K8, K10. */ + flags |= JIT_F_PREFER_IMUL; + } +#endif + } + /* Check for required instruction set support on x86 (unnecessary on x64). */ +#if LJ_TARGET_X86 +#if !defined(LUAJIT_CPU_NOCMOV) + if (!(flags & JIT_F_CMOV)) + luaL_error(L, "Ancient CPU lacks CMOV support (recompile with -DLUAJIT_CPU_NOCMOV)"); +#endif +#if defined(LUAJIT_CPU_SSE2) + if (!(flags & JIT_F_SSE2)) + luaL_error(L, "CPU does not support SSE2 (recompile without -DLUAJIT_CPU_SSE2)"); +#endif +#endif +#elif LJ_TARGET_ARM + /* Compile-time ARM CPU detection. */ +#if __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ + flags |= JIT_F_ARMV6|JIT_F_ARMV6T2|JIT_F_ARMV7; +#elif __ARM_ARCH_6T2__ + flags |= JIT_F_ARMV6|JIT_F_ARMV6T2; +#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__ + flags |= JIT_F_ARMV6; +#endif + /* Runtime ARM CPU detection. */ +#if LJ_TARGET_LINUX + if (!(flags & JIT_F_ARMV7)) { + struct utsname ut; + uname(&ut); + if (strncmp(ut.machine, "armv", 4) == 0) { + if (ut.machine[4] >= '7') + flags |= JIT_F_ARMV6|JIT_F_ARMV6T2|JIT_F_ARMV7; + else if (ut.machine[4] == '6') + flags |= JIT_F_ARMV6; + } + } +#endif +#elif LJ_TARGET_PPC + /* Nothing to do. */ +#else +#error "Missing CPU detection for this architecture" +#endif + UNUSED(L); + return flags; +} + +/* Initialize JIT compiler. */ +static void jit_init(lua_State *L) +{ + uint32_t flags = jit_cpudetect(L); +#if LJ_HASJIT + jit_State *J = L2J(L); +#if LJ_TARGET_X86 + /* Silently turn off the JIT compiler on CPUs without SSE2. */ + if ((flags & JIT_F_SSE2)) +#endif + J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT; + memcpy(J->param, jit_param_default, sizeof(J->param)); + lj_dispatch_update(G(L)); +#else + UNUSED(flags); +#endif +} + +LUALIB_API int luaopen_jit(lua_State *L) +{ + lua_pushliteral(L, LJ_OS_NAME); + lua_pushliteral(L, LJ_ARCH_NAME); + lua_pushinteger(L, LUAJIT_VERSION_NUM); + lua_pushliteral(L, LUAJIT_VERSION); + LJ_LIB_REG(L, LUA_JITLIBNAME, jit); +#ifndef LUAJIT_DISABLE_JITUTIL + LJ_LIB_REG(L, "jit.util", jit_util); +#endif + LJ_LIB_REG(L, "jit.opt", jit_opt); + L->top -= 2; + jit_init(L); + return 1; +} + diff --git a/src/luajit2/src/lib_math.c b/src/luajit2/src/lib_math.c new file mode 100644 index 0000000000..599f948e98 --- /dev/null +++ b/src/luajit2/src/lib_math.c @@ -0,0 +1,218 @@ +/* +** Math library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include <math.h> + +#define lib_math_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_lib.h" +#include "lj_vm.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_math + +LJLIB_ASM(math_abs) LJLIB_REC(.) +{ + lj_lib_checknumber(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR) +LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL) + +LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT) +{ + lj_lib_checknum(L, 1); + return FFH_RETRY; +} +LJLIB_ASM_(math_log) LJLIB_REC(math_unary IRFPM_LOG) +LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10) +LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP) +LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN) +LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS) +LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN) +LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin) +LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos) +LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan) +LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh) +LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh) +LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh) +LJLIB_ASM_(math_frexp) +LJLIB_ASM_(math_modf) LJLIB_REC(.) + +LJLIB_PUSH(57.29577951308232) +LJLIB_ASM_(math_deg) LJLIB_REC(math_degrad) + +LJLIB_PUSH(0.017453292519943295) +LJLIB_ASM_(math_rad) LJLIB_REC(math_degrad) + +LJLIB_ASM(math_atan2) LJLIB_REC(.) +{ + lj_lib_checknum(L, 1); + lj_lib_checknum(L, 2); + return FFH_RETRY; +} +LJLIB_ASM_(math_pow) LJLIB_REC(.) +LJLIB_ASM_(math_fmod) + +LJLIB_ASM(math_ldexp) LJLIB_REC(.) +{ + lj_lib_checknum(L, 1); +#if LJ_DUALNUM && !LJ_TARGET_X86ORX64 + lj_lib_checkint(L, 2); +#else + lj_lib_checknum(L, 2); +#endif + return FFH_RETRY; +} + +LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN) +{ + int i = 0; + do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); + return FFH_RETRY; +} +LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX) + +LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi) +LJLIB_PUSH(1e310) LJLIB_SET(huge) + +/* ------------------------------------------------------------------------ */ + +/* This implements a Tausworthe PRNG with period 2^223. Based on: +** Tables of maximally-equidistributed combined LFSR generators, +** Pierre L'Ecuyer, 1991, table 3, 1st entry. +** Full-period ME-CF generator with L=64, J=4, k=223, N1=49. +*/ + +/* PRNG state. */ +struct RandomState { + uint64_t gen[4]; /* State of the 4 LFSR generators. */ + int valid; /* State is valid. */ +}; + +/* Union needed for bit-pattern conversion between uint64_t and double. */ +typedef union { uint64_t u64; double d; } U64double; + +/* Update generator i and compute a running xor of all states. */ +#define TW223_GEN(i, k, q, s) \ + z = rs->gen[i]; \ + z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \ + r ^= z; rs->gen[i] = z; + +/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */ +LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs) +{ + uint64_t z, r = 0; + TW223_GEN(0, 63, 31, 18) + TW223_GEN(1, 58, 19, 28) + TW223_GEN(2, 55, 24, 7) + TW223_GEN(3, 47, 21, 8) + return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000); +} + +/* PRNG initialization function. */ +static void random_init(RandomState *rs, double d) +{ + uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */ + int i; + for (i = 0; i < 4; i++) { + U64double u; + uint32_t m = 1u << (r&255); + r >>= 8; + u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354; + if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */ + rs->gen[i] = u.u64; + } + rs->valid = 1; + for (i = 0; i < 10; i++) + lj_math_random_step(rs); +} + +/* PRNG extract function. */ +LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ +LJLIB_CF(math_random) LJLIB_REC(.) +{ + int n = (int)(L->top - L->base); + RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); + U64double u; + double d; + if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0); + u.u64 = lj_math_random_step(rs); + d = u.d - 1.0; + if (n > 0) { +#if LJ_DUALNUM + int isint = 1; + double r1; + lj_lib_checknumber(L, 1); + if (tvisint(L->base)) { + r1 = (lua_Number)intV(L->base); + } else { + isint = 0; + r1 = numV(L->base); + } +#else + double r1 = lj_lib_checknum(L, 1); +#endif + if (n == 1) { + d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */ + } else { +#if LJ_DUALNUM + double r2; + lj_lib_checknumber(L, 2); + if (tvisint(L->base+1)) { + r2 = (lua_Number)intV(L->base+1); + } else { + isint = 0; + r2 = numV(L->base+1); + } +#else + double r2 = lj_lib_checknum(L, 2); +#endif + d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */ + } +#if LJ_DUALNUM + if (isint) { + setintV(L->top-1, lj_num2int(d)); + return 1; + } +#endif + } /* else: d is a double in range [0, 1] */ + setnumV(L->top++, d); + return 1; +} + +/* PRNG seed function. */ +LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ +LJLIB_CF(math_randomseed) +{ + RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); + random_init(rs, lj_lib_checknum(L, 1)); + return 0; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_math(lua_State *L) +{ + RandomState *rs; + rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState)); + rs->valid = 0; /* Use lazy initialization to save some time on startup. */ + LJ_LIB_REG(L, LUA_MATHLIBNAME, math); +#if defined(LUA_COMPAT_MOD) + lua_getfield(L, -1, "fmod"); + lua_setfield(L, -2, "mod"); +#endif + return 1; +} + diff --git a/src/luajit2/src/lib_os.c b/src/luajit2/src/lib_os.c new file mode 100644 index 0000000000..729294086a --- /dev/null +++ b/src/luajit2/src/lib_os.c @@ -0,0 +1,256 @@ +/* +** OS library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <errno.h> +#include <locale.h> +#include <time.h> + +#define lib_os_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_lib.h" + +#if LJ_TARGET_POSIX +#include <unistd.h> +#else +#include <stdio.h> +#endif + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_os + +static int os_pushresult(lua_State *L, int i, const char *filename) +{ + int en = errno; /* calls to Lua API may change this value */ + if (i) { + setboolV(L->top-1, 1); + return 1; + } else { + setnilV(L->top-1); + lua_pushfstring(L, "%s: %s", filename, strerror(en)); + lua_pushinteger(L, en); + return 3; + } +} + +LJLIB_CF(os_execute) +{ + lua_pushinteger(L, system(luaL_optstring(L, 1, NULL))); + return 1; +} + +LJLIB_CF(os_remove) +{ + const char *filename = luaL_checkstring(L, 1); + return os_pushresult(L, remove(filename) == 0, filename); +} + +LJLIB_CF(os_rename) +{ + const char *fromname = luaL_checkstring(L, 1); + const char *toname = luaL_checkstring(L, 2); + return os_pushresult(L, rename(fromname, toname) == 0, fromname); +} + +LJLIB_CF(os_tmpname) +{ +#if LJ_TARGET_POSIX + char buf[15+1]; + int fp; + strcpy(buf, "/tmp/lua_XXXXXX"); + fp = mkstemp(buf); + if (fp != -1) + close(fp); + else + lj_err_caller(L, LJ_ERR_OSUNIQF); +#else + char buf[L_tmpnam]; + if (tmpnam(buf) == NULL) + lj_err_caller(L, LJ_ERR_OSUNIQF); +#endif + lua_pushstring(L, buf); + return 1; +} + +LJLIB_CF(os_getenv) +{ + lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */ + return 1; +} + +LJLIB_CF(os_exit) +{ + int status; + if (L->base < L->top && tvisbool(L->base)) + status = boolV(L->base) ? EXIT_SUCCESS : EXIT_FAILURE; + else + status = lj_lib_optint(L, 1, EXIT_SUCCESS); + if (L->base+1 < L->top && tvistruecond(L->base+1)) + lua_close(L); + exit(status); + return 0; /* Unreachable. */ +} + +LJLIB_CF(os_clock) +{ + setnumV(L->top++, ((lua_Number)clock())*(1.0/(lua_Number)CLOCKS_PER_SEC)); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void setfield(lua_State *L, const char *key, int value) +{ + lua_pushinteger(L, value); + lua_setfield(L, -2, key); +} + +static void setboolfield(lua_State *L, const char *key, int value) +{ + if (value < 0) /* undefined? */ + return; /* does not set field */ + lua_pushboolean(L, value); + lua_setfield(L, -2, key); +} + +static int getboolfield(lua_State *L, const char *key) +{ + int res; + lua_getfield(L, -1, key); + res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1); + lua_pop(L, 1); + return res; +} + +static int getfield(lua_State *L, const char *key, int d) +{ + int res; + lua_getfield(L, -1, key); + if (lua_isnumber(L, -1)) { + res = (int)lua_tointeger(L, -1); + } else { + if (d < 0) + lj_err_callerv(L, LJ_ERR_OSDATEF, key); + res = d; + } + lua_pop(L, 1); + return res; +} + +LJLIB_CF(os_date) +{ + const char *s = luaL_optstring(L, 1, "%c"); + time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL)); + struct tm *stm; + if (*s == '!') { /* UTC? */ + stm = gmtime(&t); + s++; /* skip `!' */ + } else { + stm = localtime(&t); + } + if (stm == NULL) { /* invalid date? */ + setnilV(L->top-1); + } else if (strcmp(s, "*t") == 0) { + lua_createtable(L, 0, 9); /* 9 = number of fields */ + setfield(L, "sec", stm->tm_sec); + setfield(L, "min", stm->tm_min); + setfield(L, "hour", stm->tm_hour); + setfield(L, "day", stm->tm_mday); + setfield(L, "month", stm->tm_mon+1); + setfield(L, "year", stm->tm_year+1900); + setfield(L, "wday", stm->tm_wday+1); + setfield(L, "yday", stm->tm_yday+1); + setboolfield(L, "isdst", stm->tm_isdst); + } else { + char cc[3]; + luaL_Buffer b; + cc[0] = '%'; cc[2] = '\0'; + luaL_buffinit(L, &b); + for (; *s; s++) { + if (*s != '%' || *(s + 1) == '\0') { /* no conversion specifier? */ + luaL_addchar(&b, *s); + } else { + size_t reslen; + char buff[200]; /* should be big enough for any conversion result */ + cc[1] = *(++s); + reslen = strftime(buff, sizeof(buff), cc, stm); + luaL_addlstring(&b, buff, reslen); + } + } + luaL_pushresult(&b); + } + return 1; +} + +LJLIB_CF(os_time) +{ + time_t t; + if (lua_isnoneornil(L, 1)) { /* called without args? */ + t = time(NULL); /* get current time */ + } else { + struct tm ts; + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 1); /* make sure table is at the top */ + ts.tm_sec = getfield(L, "sec", 0); + ts.tm_min = getfield(L, "min", 0); + ts.tm_hour = getfield(L, "hour", 12); + ts.tm_mday = getfield(L, "day", -1); + ts.tm_mon = getfield(L, "month", -1) - 1; + ts.tm_year = getfield(L, "year", -1) - 1900; + ts.tm_isdst = getboolfield(L, "isdst"); + t = mktime(&ts); + } + if (t == (time_t)(-1)) + lua_pushnil(L); + else + lua_pushnumber(L, (lua_Number)t); + return 1; +} + +LJLIB_CF(os_difftime) +{ + lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), + (time_t)(luaL_optnumber(L, 2, (lua_Number)0)))); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +LJLIB_CF(os_setlocale) +{ + GCstr *s = lj_lib_optstr(L, 1); + const char *str = s ? strdata(s) : NULL; + int opt = lj_lib_checkopt(L, 2, 6, + "\5ctype\7numeric\4time\7collate\10monetary\1\377\3all"); + if (opt == 0) opt = LC_CTYPE; + else if (opt == 1) opt = LC_NUMERIC; + else if (opt == 2) opt = LC_TIME; + else if (opt == 3) opt = LC_COLLATE; + else if (opt == 4) opt = LC_MONETARY; + else if (opt == 6) opt = LC_ALL; + lua_pushstring(L, setlocale(opt, str)); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_os(lua_State *L) +{ + LJ_LIB_REG(L, LUA_OSLIBNAME, os); + return 1; +} + diff --git a/src/luajit2/src/lib_package.c b/src/luajit2/src/lib_package.c new file mode 100644 index 0000000000..29aa7d8259 --- /dev/null +++ b/src/luajit2/src/lib_package.c @@ -0,0 +1,527 @@ +/* +** Package library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_package_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_lib.h" + +#include "physfs.h" + +/* ------------------------------------------------------------------------ */ + +/* Error codes for ll_loadfunc. */ +#define PACKAGE_ERR_LIB 1 +#define PACKAGE_ERR_FUNC 2 + +/* Redefined in platform specific part. */ +#define PACKAGE_LIB_FAIL "open" +#define setprogdir(L) ((void)0) + +#if LJ_TARGET_DLOPEN + +#include <dlfcn.h> + +static void ll_unloadlib(void *lib) +{ + dlclose(lib); +} + +static void *ll_load(lua_State *L, const char *path) +{ + void *lib = dlopen(path, RTLD_NOW); + if (lib == NULL) lua_pushstring(L, dlerror()); + return lib; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + lua_CFunction f = (lua_CFunction)dlsym(lib, sym); + if (f == NULL) lua_pushstring(L, dlerror()); + return f; +} + +#elif LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +#undef setprogdir + +static void setprogdir(lua_State *L) +{ + char buff[MAX_PATH + 1]; + char *lb; + DWORD nsize = sizeof(buff); + DWORD n = GetModuleFileNameA(NULL, buff, nsize); + if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) { + luaL_error(L, "unable to get ModuleFileName"); + } else { + *lb = '\0'; + luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff); + lua_remove(L, -2); /* remove original string */ + } +} + +static void pusherror(lua_State *L) +{ + DWORD error = GetLastError(); + char buffer[128]; + if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, 0, buffer, sizeof(buffer), NULL)) + lua_pushstring(L, buffer); + else + lua_pushfstring(L, "system error %d\n", error); +} + +static void ll_unloadlib(void *lib) +{ + FreeLibrary((HINSTANCE)lib); +} + +static void *ll_load(lua_State *L, const char *path) +{ + HINSTANCE lib = LoadLibraryA(path); + if (lib == NULL) pusherror(L); + return lib; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym); + if (f == NULL) pusherror(L); + return f; +} + +#else + +#undef PACKAGE_LIB_FAIL +#define PACKAGE_LIB_FAIL "absent" + +#define DLMSG "dynamic libraries not enabled; no support for target OS" + +static void ll_unloadlib(void *lib) +{ + (void)lib; +} + +static void *ll_load(lua_State *L, const char *path) +{ + (void)path; + lua_pushliteral(L, DLMSG); + return NULL; +} + +static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym) +{ + (void)lib; (void)sym; + lua_pushliteral(L, DLMSG); + return NULL; +} +#endif + +/* ------------------------------------------------------------------------ */ + +static void **ll_register(lua_State *L, const char *path) +{ + void **plib; + lua_pushfstring(L, "LOADLIB: %s", path); + lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */ + if (!lua_isnil(L, -1)) { /* is there an entry? */ + plib = (void **)lua_touserdata(L, -1); + } else { /* no entry yet; create one */ + lua_pop(L, 1); + plib = (void **)lua_newuserdata(L, sizeof(void *)); + *plib = NULL; + luaL_getmetatable(L, "_LOADLIB"); + lua_setmetatable(L, -2); + lua_pushfstring(L, "LOADLIB: %s", path); + lua_pushvalue(L, -2); + lua_settable(L, LUA_REGISTRYINDEX); + } + return plib; +} + +static int ll_loadfunc(lua_State *L, const char *path, const char *sym) +{ + void **reg = ll_register(L, path); + if (*reg == NULL) *reg = ll_load(L, path); + if (*reg == NULL) { + return PACKAGE_ERR_LIB; /* unable to load library */ + } else { + lua_CFunction f = ll_sym(L, *reg, sym); + if (f == NULL) + return PACKAGE_ERR_FUNC; /* unable to find function */ + lua_pushcfunction(L, f); + return 0; /* return function */ + } +} + +static int lj_cf_package_loadlib(lua_State *L) +{ + const char *path = luaL_checkstring(L, 1); + const char *init = luaL_checkstring(L, 2); + int st = ll_loadfunc(L, path, init); + if (st == 0) { /* no errors? */ + return 1; /* return the loaded function */ + } else { /* error; error message is on stack top */ + lua_pushnil(L); + lua_insert(L, -2); + lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init"); + return 3; /* return nil, error message, and where */ + } +} + +static int lj_cf_package_unloadlib(lua_State *L) +{ + void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB"); + if (*lib) ll_unloadlib(*lib); + *lib = NULL; /* mark library as closed */ + return 0; +} + +/* ------------------------------------------------------------------------ */ + +static int readable(const char *filename) +{ + return PHYSFS_exists(filename); +} + +static const char *pushnexttemplate(lua_State *L, const char *path) +{ + const char *l; + while (*path == *LUA_PATHSEP) path++; /* skip separators */ + if (*path == '\0') return NULL; /* no more templates */ + l = strchr(path, *LUA_PATHSEP); /* find next separator */ + if (l == NULL) l = path + strlen(path); + lua_pushlstring(L, path, (size_t)(l - path)); /* template */ + return l; +} + +static const char *searchpath (lua_State *L, const char *name, + const char *path) +{ + name = luaL_gsub(L, name, ".", LUA_DIRSEP); + lua_pushliteral(L, ""); /* error accumulator */ + while ((path = pushnexttemplate(L, path)) != NULL) { + const char *filename = luaL_gsub(L, lua_tostring(L, -1), + LUA_PATH_MARK, name); + lua_remove(L, -2); /* remove path template */ + if (readable(filename)) /* does file exist and is readable? */ + return filename; /* return that file name */ + lua_pushfstring(L, "\n\tno file " LUA_QS, filename); + lua_remove(L, -2); /* remove file name */ + lua_concat(L, 2); /* add entry to possible error message */ + } + return NULL; /* not found */ +} + +static int lj_cf_package_searchpath(lua_State *L) +{ + const char *f = searchpath(L, luaL_checkstring(L, 1), luaL_checkstring(L, 2)); + if (f != NULL) { + return 1; + } else { /* error message is on top of the stack */ + lua_pushnil(L); + lua_insert(L, -2); + return 2; /* return nil + error message */ + } +} + +static const char *findfile(lua_State *L, const char *name, + const char *pname) +{ + const char *path; + lua_getfield(L, LUA_ENVIRONINDEX, pname); + path = lua_tostring(L, -1); + if (path == NULL) + luaL_error(L, LUA_QL("package.%s") " must be a string", pname); + return searchpath(L, name, path); +} + +static void loaderror(lua_State *L, const char *filename) +{ + luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s", + lua_tostring(L, 1), filename, lua_tostring(L, -1)); +} + +static int lj_cf_package_loader_lua(lua_State *L) +{ + const char *filename; + const char *name = luaL_checkstring(L, 1); + filename = findfile(L, name, "path"); + if (filename == NULL) return 1; /* library not found in this path */ + if (luaL_loadfile(L, filename) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + +static const char *mkfuncname(lua_State *L, const char *modname) +{ + const char *funcname; + const char *mark = strchr(modname, *LUA_IGMARK); + if (mark) modname = mark + 1; + funcname = luaL_gsub(L, modname, ".", "_"); + funcname = lua_pushfstring(L, "luaopen_%s", funcname); + lua_remove(L, -2); /* remove 'gsub' result */ + return funcname; +} + +static int lj_cf_package_loader_c(lua_State *L) +{ + const char *funcname; + const char *name = luaL_checkstring(L, 1); + const char *filename = findfile(L, name, "cpath"); + if (filename == NULL) return 1; /* library not found in this path */ + funcname = mkfuncname(L, name); + if (ll_loadfunc(L, filename, funcname) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + +static int lj_cf_package_loader_croot(lua_State *L) +{ + const char *funcname; + const char *filename; + const char *name = luaL_checkstring(L, 1); + const char *p = strchr(name, '.'); + int st; + if (p == NULL) return 0; /* is root */ + lua_pushlstring(L, name, (size_t)(p - name)); + filename = findfile(L, lua_tostring(L, -1), "cpath"); + if (filename == NULL) return 1; /* root not found */ + funcname = mkfuncname(L, name); + if ((st = ll_loadfunc(L, filename, funcname)) != 0) { + if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */ + lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, + name, filename); + return 1; /* function not found */ + } + return 1; +} + +static int lj_cf_package_loader_preload(lua_State *L) +{ + const char *name = luaL_checkstring(L, 1); + lua_getfield(L, LUA_ENVIRONINDEX, "preload"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.preload") " must be a table"); + lua_getfield(L, -1, name); + if (lua_isnil(L, -1)) /* not found? */ + lua_pushfstring(L, "\n\tno field package.preload['%s']", name); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static const int sentinel_ = 0; +#define sentinel ((void *)&sentinel_) + +static int lj_cf_package_require(lua_State *L) +{ + const char *name = luaL_checkstring(L, 1); + int i; + lua_settop(L, 1); /* _LOADED table will be at index 2 */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, 2, name); + if (lua_toboolean(L, -1)) { /* is it there? */ + if (lua_touserdata(L, -1) == sentinel) /* check loops */ + luaL_error(L, "loop or previous error loading module " LUA_QS, name); + return 1; /* package is already loaded */ + } + /* else must load it; iterate over available loaders */ + lua_getfield(L, LUA_ENVIRONINDEX, "loaders"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.loaders") " must be a table"); + lua_pushliteral(L, ""); /* error message accumulator */ + for (i = 1; ; i++) { + lua_rawgeti(L, -2, i); /* get a loader */ + if (lua_isnil(L, -1)) + luaL_error(L, "module " LUA_QS " not found:%s", + name, lua_tostring(L, -2)); + lua_pushstring(L, name); + lua_call(L, 1, 1); /* call it */ + if (lua_isfunction(L, -1)) /* did it find module? */ + break; /* module loaded successfully */ + else if (lua_isstring(L, -1)) /* loader returned error message? */ + lua_concat(L, 2); /* accumulate it */ + else + lua_pop(L, 1); + } + lua_pushlightuserdata(L, sentinel); + lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */ + lua_pushstring(L, name); /* pass name as argument to module */ + lua_call(L, 1, 1); /* run loaded module */ + if (!lua_isnil(L, -1)) /* non-nil return? */ + lua_setfield(L, 2, name); /* _LOADED[name] = returned value */ + lua_getfield(L, 2, name); + if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */ + lua_pushboolean(L, 1); /* use true as result */ + lua_pushvalue(L, -1); /* extra copy to be returned */ + lua_setfield(L, 2, name); /* _LOADED[name] = true */ + } + lj_lib_checkfpu(L); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void setfenv(lua_State *L) +{ + lua_Debug ar; + if (lua_getstack(L, 1, &ar) == 0 || + lua_getinfo(L, "f", &ar) == 0 || /* get calling function */ + lua_iscfunction(L, -1)) + luaL_error(L, LUA_QL("module") " not called from a Lua function"); + lua_pushvalue(L, -2); + lua_setfenv(L, -2); + lua_pop(L, 1); +} + +static void dooptions(lua_State *L, int n) +{ + int i; + for (i = 2; i <= n; i++) { + lua_pushvalue(L, i); /* get option (a function) */ + lua_pushvalue(L, -2); /* module */ + lua_call(L, 1, 0); + } +} + +static void modinit(lua_State *L, const char *modname) +{ + const char *dot; + lua_pushvalue(L, -1); + lua_setfield(L, -2, "_M"); /* module._M = module */ + lua_pushstring(L, modname); + lua_setfield(L, -2, "_NAME"); + dot = strrchr(modname, '.'); /* look for last dot in module name */ + if (dot == NULL) dot = modname; else dot++; + /* set _PACKAGE as package name (full module name minus last part) */ + lua_pushlstring(L, modname, (size_t)(dot - modname)); + lua_setfield(L, -2, "_PACKAGE"); +} + +static int lj_cf_package_module(lua_State *L) +{ + const char *modname = luaL_checkstring(L, 1); + int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, loaded, modname); /* get _LOADED[modname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, modname); + lua_pushvalue(L, -1); + lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */ + } + /* check whether table already has a _NAME field */ + lua_getfield(L, -1, "_NAME"); + if (!lua_isnil(L, -1)) { /* is table an initialized module? */ + lua_pop(L, 1); + } else { /* no; initialize it */ + lua_pop(L, 1); + modinit(L, modname); + } + lua_pushvalue(L, -1); + setfenv(L); + dooptions(L, loaded - 1); + return 0; +} + +static int lj_cf_package_seeall(lua_State *L) +{ + luaL_checktype(L, 1, LUA_TTABLE); + if (!lua_getmetatable(L, 1)) { + lua_createtable(L, 0, 1); /* create new metatable */ + lua_pushvalue(L, -1); + lua_setmetatable(L, 1); + } + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setfield(L, -2, "__index"); /* mt.__index = _G */ + return 0; +} + +/* ------------------------------------------------------------------------ */ + +#define AUXMARK "\1" + +static void setpath(lua_State *L, const char *fieldname, const char *envname, + const char *def) +{ + const char *path = getenv(envname); + if (path == NULL) { + lua_pushstring(L, def); + } else { + path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP, + LUA_PATHSEP AUXMARK LUA_PATHSEP); + luaL_gsub(L, path, AUXMARK, def); + lua_remove(L, -2); + } + setprogdir(L); + lua_setfield(L, -2, fieldname); +} + +static const luaL_Reg package_lib[] = { + { "loadlib", lj_cf_package_loadlib }, + { "searchpath", lj_cf_package_searchpath }, + { "seeall", lj_cf_package_seeall }, + { NULL, NULL } +}; + +static const luaL_Reg package_global[] = { + { "module", lj_cf_package_module }, + { "require", lj_cf_package_require }, + { NULL, NULL } +}; + +static const lua_CFunction package_loaders[] = +{ + lj_cf_package_loader_preload, + lj_cf_package_loader_lua, + lj_cf_package_loader_c, + lj_cf_package_loader_croot, + NULL +}; + +LUALIB_API int luaopen_package(lua_State *L) +{ + int i; + luaL_newmetatable(L, "_LOADLIB"); + lj_lib_pushcf(L, lj_cf_package_unloadlib, 1); + lua_setfield(L, -2, "__gc"); + luaL_register(L, LUA_LOADLIBNAME, package_lib); + lua_pushvalue(L, -1); + lua_replace(L, LUA_ENVIRONINDEX); + lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0); + for (i = 0; package_loaders[i] != NULL; i++) { + lj_lib_pushcf(L, package_loaders[i], 1); + lua_rawseti(L, -2, i+1); + } + lua_setfield(L, -2, "loaders"); + setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT); + setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT); + lua_pushliteral(L, LUA_PATH_CONFIG); + lua_setfield(L, -2, "config"); + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_setfield(L, -2, "loaded"); + luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4); + lua_setfield(L, -2, "preload"); + lua_pushvalue(L, LUA_GLOBALSINDEX); + luaL_register(L, NULL, package_global); + lua_pop(L, 1); + return 1; +} + diff --git a/src/luajit2/src/lib_string.c b/src/luajit2/src/lib_string.c new file mode 100644 index 0000000000..5cbe6e4d57 --- /dev/null +++ b/src/luajit2/src/lib_string.c @@ -0,0 +1,855 @@ +/* +** String library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <stdio.h> + +#define lib_string_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_ff.h" +#include "lj_bcdump.h" +#include "lj_char.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_string + +LJLIB_ASM(string_len) LJLIB_REC(.) +{ + lj_lib_checkstr(L, 1); + return FFH_RETRY; +} + +LJLIB_ASM(string_byte) LJLIB_REC(string_range 0) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int32_t len = (int32_t)s->len; + int32_t start = lj_lib_optint(L, 2, 1); + int32_t stop = lj_lib_optint(L, 3, start); + int32_t n, i; + const unsigned char *p; + if (stop < 0) stop += len+1; + if (start < 0) start += len+1; + if (start <= 0) start = 1; + if (stop > len) stop = len; + if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */ + start--; + n = stop - start; + if ((uint32_t)n > LUAI_MAXCSTACK) + lj_err_caller(L, LJ_ERR_STRSLC); + lj_state_checkstack(L, (MSize)n); + p = (const unsigned char *)strdata(s) + start; + for (i = 0; i < n; i++) + setintV(L->base + i-1, p[i]); + return FFH_RES(n); +} + +LJLIB_ASM(string_char) +{ + int i, nargs = (int)(L->top - L->base); + char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, (size_t)nargs); + for (i = 1; i <= nargs; i++) { + int32_t k = lj_lib_checkint(L, i); + if (!checku8(k)) + lj_err_arg(L, i, LJ_ERR_BADVAL); + buf[i-1] = (char)k; + } + setstrV(L, L->base-1, lj_str_new(L, buf, (size_t)nargs)); + return FFH_RES(1); +} + +LJLIB_ASM(string_sub) LJLIB_REC(string_range 1) +{ + lj_lib_checkstr(L, 1); + lj_lib_checkint(L, 2); + setintV(L->base+2, lj_lib_optint(L, 3, -1)); + return FFH_RETRY; +} + +LJLIB_ASM(string_rep) +{ + GCstr *s = lj_lib_checkstr(L, 1); + int32_t len = (int32_t)s->len; + int32_t k = lj_lib_checkint(L, 2); + int64_t tlen = (int64_t)k * len; + const char *src; + char *buf; + if (k <= 0) return FFH_RETRY; + if (tlen > LJ_MAX_STR) + lj_err_caller(L, LJ_ERR_STROV); + buf = lj_str_needbuf(L, &G(L)->tmpbuf, (MSize)tlen); + if (len <= 1) return FFH_RETRY; /* ASM code only needed buffer resize. */ + src = strdata(s); + do { + int32_t i = 0; + do { *buf++ = src[i++]; } while (i < len); + } while (--k > 0); + setstrV(L, L->base-1, lj_str_new(L, G(L)->tmpbuf.buf, (size_t)tlen)); + return FFH_RES(1); +} + +LJLIB_ASM(string_reverse) +{ + GCstr *s = lj_lib_checkstr(L, 1); + lj_str_needbuf(L, &G(L)->tmpbuf, s->len); + return FFH_RETRY; +} +LJLIB_ASM_(string_lower) +LJLIB_ASM_(string_upper) + +/* ------------------------------------------------------------------------ */ + +static int writer_buf(lua_State *L, const void *p, size_t size, void *b) +{ + luaL_addlstring((luaL_Buffer *)b, (const char *)p, size); + UNUSED(L); + return 0; +} + +LJLIB_CF(string_dump) +{ + GCfunc *fn = lj_lib_checkfunc(L, 1); + int strip = L->base+1 < L->top && tvistruecond(L->base+1); + luaL_Buffer b; + L->top = L->base+1; + luaL_buffinit(L, &b); + if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, &b, strip)) + lj_err_caller(L, LJ_ERR_STRDUMP); + luaL_pushresult(&b); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +/* macro to `unsign' a character */ +#define uchar(c) ((unsigned char)(c)) + +#define CAP_UNFINISHED (-1) +#define CAP_POSITION (-2) + +typedef struct MatchState { + const char *src_init; /* init of source string */ + const char *src_end; /* end (`\0') of source string */ + lua_State *L; + int level; /* total number of captures (finished or unfinished) */ + struct { + const char *init; + ptrdiff_t len; + } capture[LUA_MAXCAPTURES]; +} MatchState; + +#define L_ESC '%' +#define SPECIALS "^$*+?.([%-" + +static int check_capture(MatchState *ms, int l) +{ + l -= '1'; + if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED) + lj_err_caller(ms->L, LJ_ERR_STRCAPI); + return l; +} + +static int capture_to_close(MatchState *ms) +{ + int level = ms->level; + for (level--; level>=0; level--) + if (ms->capture[level].len == CAP_UNFINISHED) return level; + lj_err_caller(ms->L, LJ_ERR_STRPATC); + return 0; /* unreachable */ +} + +static const char *classend(MatchState *ms, const char *p) +{ + switch (*p++) { + case L_ESC: + if (*p == '\0') + lj_err_caller(ms->L, LJ_ERR_STRPATE); + return p+1; + case '[': + if (*p == '^') p++; + do { /* look for a `]' */ + if (*p == '\0') + lj_err_caller(ms->L, LJ_ERR_STRPATM); + if (*(p++) == L_ESC && *p != '\0') + p++; /* skip escapes (e.g. `%]') */ + } while (*p != ']'); + return p+1; + default: + return p; + } +} + +static const unsigned char match_class_map[32] = { + 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0, + LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0, + LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0 +}; + +static int match_class(int c, int cl) +{ + if ((cl & 0xc0) == 0x40) { + int t = match_class_map[(cl&0x1f)]; + if (t) { + t = lj_char_isa(c, t); + return (cl & 0x20) ? t : !t; + } + if (cl == 'z') return c == 0; + if (cl == 'Z') return c != 0; + } + return (cl == c); +} + +static int matchbracketclass(int c, const char *p, const char *ec) +{ + int sig = 1; + if (*(p+1) == '^') { + sig = 0; + p++; /* skip the `^' */ + } + while (++p < ec) { + if (*p == L_ESC) { + p++; + if (match_class(c, uchar(*p))) + return sig; + } + else if ((*(p+1) == '-') && (p+2 < ec)) { + p+=2; + if (uchar(*(p-2)) <= c && c <= uchar(*p)) + return sig; + } + else if (uchar(*p) == c) return sig; + } + return !sig; +} + +static int singlematch(int c, const char *p, const char *ep) +{ + switch (*p) { + case '.': return 1; /* matches any char */ + case L_ESC: return match_class(c, uchar(*(p+1))); + case '[': return matchbracketclass(c, p, ep-1); + default: return (uchar(*p) == c); + } +} + +static const char *match(MatchState *ms, const char *s, const char *p); + +static const char *matchbalance(MatchState *ms, const char *s, const char *p) +{ + if (*p == 0 || *(p+1) == 0) + lj_err_caller(ms->L, LJ_ERR_STRPATU); + if (*s != *p) { + return NULL; + } else { + int b = *p; + int e = *(p+1); + int cont = 1; + while (++s < ms->src_end) { + if (*s == e) { + if (--cont == 0) return s+1; + } else if (*s == b) { + cont++; + } + } + } + return NULL; /* string ends out of balance */ +} + +static const char *max_expand(MatchState *ms, const char *s, + const char *p, const char *ep) +{ + ptrdiff_t i = 0; /* counts maximum expand for item */ + while ((s+i)<ms->src_end && singlematch(uchar(*(s+i)), p, ep)) + i++; + /* keeps trying to match with the maximum repetitions */ + while (i>=0) { + const char *res = match(ms, (s+i), ep+1); + if (res) return res; + i--; /* else didn't match; reduce 1 repetition to try again */ + } + return NULL; +} + +static const char *min_expand(MatchState *ms, const char *s, + const char *p, const char *ep) +{ + for (;;) { + const char *res = match(ms, s, ep+1); + if (res != NULL) + return res; + else if (s<ms->src_end && singlematch(uchar(*s), p, ep)) + s++; /* try with one more repetition */ + else + return NULL; + } +} + +static const char *start_capture(MatchState *ms, const char *s, + const char *p, int what) +{ + const char *res; + int level = ms->level; + if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN); + ms->capture[level].init = s; + ms->capture[level].len = what; + ms->level = level+1; + if ((res=match(ms, s, p)) == NULL) /* match failed? */ + ms->level--; /* undo capture */ + return res; +} + +static const char *end_capture(MatchState *ms, const char *s, + const char *p) +{ + int l = capture_to_close(ms); + const char *res; + ms->capture[l].len = s - ms->capture[l].init; /* close capture */ + if ((res = match(ms, s, p)) == NULL) /* match failed? */ + ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ + return res; +} + +static const char *match_capture(MatchState *ms, const char *s, int l) +{ + size_t len; + l = check_capture(ms, l); + len = (size_t)ms->capture[l].len; + if ((size_t)(ms->src_end-s) >= len && + memcmp(ms->capture[l].init, s, len) == 0) + return s+len; + else + return NULL; +} + +static const char *match(MatchState *ms, const char *s, const char *p) +{ + init: /* using goto's to optimize tail recursion */ + switch (*p) { + case '(': /* start capture */ + if (*(p+1) == ')') /* position capture? */ + return start_capture(ms, s, p+2, CAP_POSITION); + else + return start_capture(ms, s, p+1, CAP_UNFINISHED); + case ')': /* end capture */ + return end_capture(ms, s, p+1); + case L_ESC: + switch (*(p+1)) { + case 'b': /* balanced string? */ + s = matchbalance(ms, s, p+2); + if (s == NULL) return NULL; + p+=4; + goto init; /* else return match(ms, s, p+4); */ + case 'f': { /* frontier? */ + const char *ep; char previous; + p += 2; + if (*p != '[') + lj_err_caller(ms->L, LJ_ERR_STRPATB); + ep = classend(ms, p); /* points to what is next */ + previous = (s == ms->src_init) ? '\0' : *(s-1); + if (matchbracketclass(uchar(previous), p, ep-1) || + !matchbracketclass(uchar(*s), p, ep-1)) return NULL; + p=ep; + goto init; /* else return match(ms, s, ep); */ + } + default: + if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ + s = match_capture(ms, s, uchar(*(p+1))); + if (s == NULL) return NULL; + p+=2; + goto init; /* else return match(ms, s, p+2) */ + } + goto dflt; /* case default */ + } + case '\0': /* end of pattern */ + return s; /* match succeeded */ + case '$': + if (*(p+1) == '\0') /* is the `$' the last char in pattern? */ + return (s == ms->src_end) ? s : NULL; /* check end of string */ + else + goto dflt; + default: dflt: { /* it is a pattern item */ + const char *ep = classend(ms, p); /* points to what is next */ + int m = s<ms->src_end && singlematch(uchar(*s), p, ep); + switch (*ep) { + case '?': { /* optional */ + const char *res; + if (m && ((res=match(ms, s+1, ep+1)) != NULL)) + return res; + p=ep+1; + goto init; /* else return match(ms, s, ep+1); */ + } + case '*': /* 0 or more repetitions */ + return max_expand(ms, s, p, ep); + case '+': /* 1 or more repetitions */ + return (m ? max_expand(ms, s+1, p, ep) : NULL); + case '-': /* 0 or more repetitions (minimum) */ + return min_expand(ms, s, p, ep); + default: + if (!m) return NULL; + s++; p=ep; + goto init; /* else return match(ms, s+1, ep); */ + } + } + } +} + +static const char *lmemfind(const char *s1, size_t l1, + const char *s2, size_t l2) +{ + if (l2 == 0) { + return s1; /* empty strings are everywhere */ + } else if (l2 > l1) { + return NULL; /* avoids a negative `l1' */ + } else { + const char *init; /* to search for a `*s2' inside `s1' */ + l2--; /* 1st char will be checked by `memchr' */ + l1 = l1-l2; /* `s2' cannot be found after that */ + while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) { + init++; /* 1st char is already checked */ + if (memcmp(init, s2+1, l2) == 0) { + return init-1; + } else { /* correct `l1' and `s1' to try again */ + l1 -= (size_t)(init-s1); + s1 = init; + } + } + return NULL; /* not found */ + } +} + +static void push_onecapture(MatchState *ms, int i, const char *s, const char *e) +{ + if (i >= ms->level) { + if (i == 0) /* ms->level == 0, too */ + lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */ + else + lj_err_caller(ms->L, LJ_ERR_STRCAPI); + } else { + ptrdiff_t l = ms->capture[i].len; + if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU); + if (l == CAP_POSITION) + lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); + else + lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l); + } +} + +static int push_captures(MatchState *ms, const char *s, const char *e) +{ + int i; + int nlevels = (ms->level == 0 && s) ? 1 : ms->level; + luaL_checkstack(ms->L, nlevels, "too many captures"); + for (i = 0; i < nlevels; i++) + push_onecapture(ms, i, s, e); + return nlevels; /* number of strings pushed */ +} + +static ptrdiff_t posrelat(ptrdiff_t pos, size_t len) +{ + /* relative string position: negative means back from end */ + if (pos < 0) pos += (ptrdiff_t)len + 1; + return (pos >= 0) ? pos : 0; +} + +static int str_find_aux(lua_State *L, int find) +{ + size_t l1, l2; + const char *s = luaL_checklstring(L, 1, &l1); + const char *p = luaL_checklstring(L, 2, &l2); + ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1; + if (init < 0) + init = 0; + else if ((size_t)(init) > l1) + init = (ptrdiff_t)l1; + if (find && (lua_toboolean(L, 4) || /* explicit request? */ + strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */ + /* do a plain search */ + const char *s2 = lmemfind(s+init, l1-(size_t)init, p, l2); + if (s2) { + lua_pushinteger(L, s2-s+1); + lua_pushinteger(L, s2-s+(ptrdiff_t)l2); + return 2; + } + } else { + MatchState ms; + int anchor = (*p == '^') ? (p++, 1) : 0; + const char *s1=s+init; + ms.L = L; + ms.src_init = s; + ms.src_end = s+l1; + do { + const char *res; + ms.level = 0; + if ((res=match(&ms, s1, p)) != NULL) { + if (find) { + lua_pushinteger(L, s1-s+1); /* start */ + lua_pushinteger(L, res-s); /* end */ + return push_captures(&ms, NULL, 0) + 2; + } else { + return push_captures(&ms, s1, res); + } + } + } while (s1++ < ms.src_end && !anchor); + } + lua_pushnil(L); /* not found */ + return 1; +} + +LJLIB_CF(string_find) +{ + return str_find_aux(L, 1); +} + +LJLIB_CF(string_match) +{ + return str_find_aux(L, 0); +} + +LJLIB_NOREG LJLIB_CF(string_gmatch_aux) +{ + const char *p = strVdata(lj_lib_upvalue(L, 2)); + GCstr *str = strV(lj_lib_upvalue(L, 1)); + const char *s = strdata(str); + TValue *tvpos = lj_lib_upvalue(L, 3); + const char *src = s + tvpos->u32.lo; + MatchState ms; + ms.L = L; + ms.src_init = s; + ms.src_end = s + str->len; + for (; src <= ms.src_end; src++) { + const char *e; + ms.level = 0; + if ((e = match(&ms, src, p)) != NULL) { + int32_t pos = (int32_t)(e - s); + if (e == src) pos++; /* Ensure progress for empty match. */ + tvpos->u32.lo = (uint32_t)pos; + return push_captures(&ms, src, e); + } + } + return 0; /* not found */ +} + +LJLIB_CF(string_gmatch) +{ + lj_lib_checkstr(L, 1); + lj_lib_checkstr(L, 2); + L->top = L->base+3; + (L->top-1)->u64 = 0; + lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3); + return 1; +} + +static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e) +{ + size_t l, i; + const char *news = lua_tolstring(ms->L, 3, &l); + for (i = 0; i < l; i++) { + if (news[i] != L_ESC) { + luaL_addchar(b, news[i]); + } else { + i++; /* skip ESC */ + if (!lj_char_isdigit(uchar(news[i]))) { + luaL_addchar(b, news[i]); + } else if (news[i] == '0') { + luaL_addlstring(b, s, (size_t)(e - s)); + } else { + push_onecapture(ms, news[i] - '1', s, e); + luaL_addvalue(b); /* add capture to accumulated result */ + } + } + } +} + +static void add_value(MatchState *ms, luaL_Buffer *b, + const char *s, const char *e) +{ + lua_State *L = ms->L; + switch (lua_type(L, 3)) { + case LUA_TNUMBER: + case LUA_TSTRING: { + add_s(ms, b, s, e); + return; + } + case LUA_TFUNCTION: { + int n; + lua_pushvalue(L, 3); + n = push_captures(ms, s, e); + lua_call(L, n, 1); + break; + } + case LUA_TTABLE: { + push_onecapture(ms, 0, s, e); + lua_gettable(L, 3); + break; + } + } + if (!lua_toboolean(L, -1)) { /* nil or false? */ + lua_pop(L, 1); + lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */ + } else if (!lua_isstring(L, -1)) { + lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1)); + } + luaL_addvalue(b); /* add result to accumulator */ +} + +LJLIB_CF(string_gsub) +{ + size_t srcl; + const char *src = luaL_checklstring(L, 1, &srcl); + const char *p = luaL_checkstring(L, 2); + int tr = lua_type(L, 3); + int max_s = luaL_optint(L, 4, (int)(srcl+1)); + int anchor = (*p == '^') ? (p++, 1) : 0; + int n = 0; + MatchState ms; + luaL_Buffer b; + if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING || + tr == LUA_TFUNCTION || tr == LUA_TTABLE)) + lj_err_arg(L, 3, LJ_ERR_NOSFT); + luaL_buffinit(L, &b); + ms.L = L; + ms.src_init = src; + ms.src_end = src+srcl; + while (n < max_s) { + const char *e; + ms.level = 0; + e = match(&ms, src, p); + if (e) { + n++; + add_value(&ms, &b, src, e); + } + if (e && e>src) /* non empty match? */ + src = e; /* skip it */ + else if (src < ms.src_end) + luaL_addchar(&b, *src++); + else + break; + if (anchor) + break; + } + luaL_addlstring(&b, src, (size_t)(ms.src_end-src)); + luaL_pushresult(&b); + lua_pushinteger(L, n); /* number of substitutions */ + return 2; +} + +/* ------------------------------------------------------------------------ */ + +/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */ +#define MAX_FMTITEM 512 +/* valid flags in a format specification */ +#define FMT_FLAGS "-+ #0" +/* +** maximum size of each format specification (such as '%-099.99d') +** (+10 accounts for %99.99x plus margin of error) +*/ +#define MAX_FMTSPEC (sizeof(FMT_FLAGS) + sizeof(LUA_INTFRMLEN) + 10) + +static void addquoted(lua_State *L, luaL_Buffer *b, int arg) +{ + GCstr *str = lj_lib_checkstr(L, arg); + int32_t len = (int32_t)str->len; + const char *s = strdata(str); + luaL_addchar(b, '"'); + while (len--) { + if (*s == '"' || *s == '\\' || *s == '\n') { + luaL_addchar(b, '\\'); + luaL_addchar(b, *s); + } else if (lj_char_iscntrl(uchar(*s))) { + uint32_t c1, c2, c3; + luaL_addchar(b, '\\'); + c1 = uchar(*s); c3 = c1 % 10; c1 /= 10; c2 = c1 % 10; c1 /= 10; + if (c1 + lj_char_isdigit(uchar(s[1]))) luaL_addchar(b, '0' + c1); + if (c2 + (c1 + lj_char_isdigit(uchar(s[1])))) luaL_addchar(b, '0' + c2); + luaL_addchar(b, '0' + c3); + } else { + luaL_addchar(b, *s); + } + s++; + } + luaL_addchar(b, '"'); +} + +static const char *scanformat(lua_State *L, const char *strfrmt, char *form) +{ + const char *p = strfrmt; + while (*p != '\0' && strchr(FMT_FLAGS, *p) != NULL) p++; /* skip flags */ + if ((size_t)(p - strfrmt) >= sizeof(FMT_FLAGS)) + lj_err_caller(L, LJ_ERR_STRFMTR); + if (lj_char_isdigit(uchar(*p))) p++; /* skip width */ + if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */ + if (*p == '.') { + p++; + if (lj_char_isdigit(uchar(*p))) p++; /* skip precision */ + if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */ + } + if (lj_char_isdigit(uchar(*p))) + lj_err_caller(L, LJ_ERR_STRFMTW); + *(form++) = '%'; + strncpy(form, strfrmt, (size_t)(p - strfrmt + 1)); + form += p - strfrmt + 1; + *form = '\0'; + return p; +} + +static void addintlen(char *form) +{ + size_t l = strlen(form); + char spec = form[l - 1]; + strcpy(form + l - 1, LUA_INTFRMLEN); + form[l + sizeof(LUA_INTFRMLEN) - 2] = spec; + form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0'; +} + +static unsigned LUA_INTFRM_T num2intfrm(lua_State *L, int arg) +{ + if (sizeof(LUA_INTFRM_T) == 4) { + return (LUA_INTFRM_T)lj_lib_checkbit(L, arg); + } else { + cTValue *o; + lj_lib_checknumber(L, arg); + o = L->base+arg-1; + if (tvisint(o)) + return (LUA_INTFRM_T)intV(o); + else + return (LUA_INTFRM_T)numV(o); + } +} + +static unsigned LUA_INTFRM_T num2uintfrm(lua_State *L, int arg) +{ + if (sizeof(LUA_INTFRM_T) == 4) { + return (unsigned LUA_INTFRM_T)lj_lib_checkbit(L, arg); + } else { + cTValue *o; + lj_lib_checknumber(L, arg); + o = L->base+arg-1; + if (tvisint(o)) + return (unsigned LUA_INTFRM_T)intV(o); + else if ((int32_t)o->u32.hi < 0) + return (unsigned LUA_INTFRM_T)(LUA_INTFRM_T)numV(o); + else + return (unsigned LUA_INTFRM_T)numV(o); + } +} + +LJLIB_CF(string_format) +{ + int arg = 1, top = (int)(L->top - L->base); + GCstr *fmt = lj_lib_checkstr(L, arg); + const char *strfrmt = strdata(fmt); + const char *strfrmt_end = strfrmt + fmt->len; + luaL_Buffer b; + luaL_buffinit(L, &b); + while (strfrmt < strfrmt_end) { + if (*strfrmt != L_ESC) { + luaL_addchar(&b, *strfrmt++); + } else if (*++strfrmt == L_ESC) { + luaL_addchar(&b, *strfrmt++); /* %% */ + } else { /* format item */ + char form[MAX_FMTSPEC]; /* to store the format (`%...') */ + char buff[MAX_FMTITEM]; /* to store the formatted item */ + if (++arg > top) + luaL_argerror(L, arg, lj_obj_typename[0]); + strfrmt = scanformat(L, strfrmt, form); + switch (*strfrmt++) { + case 'c': + sprintf(buff, form, lj_lib_checkint(L, arg)); + break; + case 'd': case 'i': + addintlen(form); + sprintf(buff, form, num2intfrm(L, arg)); + break; + case 'o': case 'u': case 'x': case 'X': + addintlen(form); + sprintf(buff, form, num2uintfrm(L, arg)); + break; + case 'e': case 'E': case 'f': case 'g': case 'G': { + TValue tv; + tv.n = lj_lib_checknum(L, arg); + if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) { + /* Canonicalize output of non-finite values. */ + char *p, nbuf[LJ_STR_NUMBUF]; + size_t len = lj_str_bufnum(nbuf, &tv); + if (strfrmt[-1] == 'E' || strfrmt[-1] == 'G') { + nbuf[len-3] = nbuf[len-3] - 0x20; + nbuf[len-2] = nbuf[len-2] - 0x20; + nbuf[len-1] = nbuf[len-1] - 0x20; + } + nbuf[len] = '\0'; + for (p = form; *p < 'e' && *p != '.'; p++) ; + *p++ = 's'; *p = '\0'; + sprintf(buff, form, nbuf); + break; + } + sprintf(buff, form, (double)tv.n); + break; + } + case 'q': + addquoted(L, &b, arg); + continue; + case 'p': + lj_str_pushf(L, "%p", lua_topointer(L, arg)); + luaL_addvalue(&b); + continue; + case 's': { + GCstr *str = lj_lib_checkstr(L, arg); + if (!strchr(form, '.') && str->len >= 100) { + /* no precision and string is too long to be formatted; + keep original string */ + setstrV(L, L->top++, str); + luaL_addvalue(&b); + continue; + } + sprintf(buff, form, strdata(str)); + break; + } + default: + lj_err_callerv(L, LJ_ERR_STRFMTO, *(strfrmt -1)); + break; + } + luaL_addlstring(&b, buff, strlen(buff)); + } + } + luaL_pushresult(&b); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_string(lua_State *L) +{ + GCtab *mt; + global_State *g; + LJ_LIB_REG(L, LUA_STRLIBNAME, string); +#if defined(LUA_COMPAT_GFIND) + lua_getfield(L, -1, "gmatch"); + lua_setfield(L, -2, "gfind"); +#endif + mt = lj_tab_new(L, 0, 1); + /* NOBARRIER: basemt is a GC root. */ + g = G(L); + setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt)); + settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1)); + mt->nomm = (uint8_t)(~(1u<<MM_index)); + return 1; +} + diff --git a/src/luajit2/src/lib_table.c b/src/luajit2/src/lib_table.c new file mode 100644 index 0000000000..5ae2edd5c9 --- /dev/null +++ b/src/luajit2/src/lib_table.c @@ -0,0 +1,293 @@ +/* +** Table library. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lib_table_c +#define LUA_LIB + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_lib.h" + +/* ------------------------------------------------------------------------ */ + +#define LJLIB_MODULE_table + +LJLIB_CF(table_foreachi) +{ + GCtab *t = lj_lib_checktab(L, 1); + GCfunc *func = lj_lib_checkfunc(L, 2); + MSize i, n = lj_tab_len(t); + for (i = 1; i <= n; i++) { + cTValue *val; + setfuncV(L, L->top, func); + setintV(L->top+1, i); + val = lj_tab_getint(t, (int32_t)i); + if (val) { copyTV(L, L->top+2, val); } else { setnilV(L->top+2); } + L->top += 3; + lua_call(L, 2, 1); + if (!tvisnil(L->top-1)) + return 1; + L->top--; + } + return 0; +} + +LJLIB_CF(table_foreach) +{ + GCtab *t = lj_lib_checktab(L, 1); + GCfunc *func = lj_lib_checkfunc(L, 2); + L->top = L->base+3; + setnilV(L->top-1); + while (lj_tab_next(L, t, L->top-1)) { + copyTV(L, L->top+2, L->top); + copyTV(L, L->top+1, L->top-1); + setfuncV(L, L->top, func); + L->top += 3; + lua_call(L, 2, 1); + if (!tvisnil(L->top-1)) + return 1; + L->top--; + } + return 0; +} + +LJLIB_ASM(table_getn) LJLIB_REC(.) +{ + lj_lib_checktab(L, 1); + return FFH_UNREACHABLE; +} + +LJLIB_CF(table_maxn) +{ + GCtab *t = lj_lib_checktab(L, 1); + TValue *array = tvref(t->array); + Node *node; + lua_Number m = 0; + ptrdiff_t i; + for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--) + if (!tvisnil(&array[i])) { + m = (lua_Number)(int32_t)i; + break; + } + node = noderef(t->node); + for (i = (ptrdiff_t)t->hmask; i >= 0; i--) + if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) { + lua_Number n = numberVnum(&node[i].key); + if (n > m) m = n; + } + setnumV(L->top-1, m); + return 1; +} + +LJLIB_CF(table_insert) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n, i = (int32_t)lj_tab_len(t) + 1; + int nargs = (int)((char *)L->top - (char *)L->base); + if (nargs != 2*sizeof(TValue)) { + if (nargs != 3*sizeof(TValue)) + lj_err_caller(L, LJ_ERR_TABINS); + /* NOBARRIER: This just moves existing elements around. */ + for (n = lj_lib_checkint(L, 2); i > n; i--) { + /* The set may invalidate the get pointer, so need to do it first! */ + TValue *dst = lj_tab_setint(L, t, i); + cTValue *src = lj_tab_getint(t, i-1); + if (src) { + copyTV(L, dst, src); + } else { + setnilV(dst); + } + } + i = n; + } + { + TValue *dst = lj_tab_setint(L, t, i); + copyTV(L, dst, L->top-1); /* Set new value. */ + lj_gc_barriert(L, t, dst); + } + return 0; +} + +LJLIB_CF(table_remove) LJLIB_REC(.) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t e = (int32_t)lj_tab_len(t); + int32_t pos = lj_lib_optint(L, 2, e); + if (!(1 <= pos && pos <= e)) /* Nothing to remove? */ + return 0; + lua_rawgeti(L, 1, pos); /* Get previous value. */ + /* NOBARRIER: This just moves existing elements around. */ + for (; pos < e; pos++) { + cTValue *src = lj_tab_getint(t, pos+1); + TValue *dst = lj_tab_setint(L, t, pos); + if (src) { + copyTV(L, dst, src); + } else { + setnilV(dst); + } + } + setnilV(lj_tab_setint(L, t, e)); /* Remove (last) value. */ + return 1; /* Return previous value. */ +} + +LJLIB_CF(table_concat) +{ + luaL_Buffer b; + GCtab *t = lj_lib_checktab(L, 1); + GCstr *sep = lj_lib_optstr(L, 2); + MSize seplen = sep ? sep->len : 0; + int32_t i = lj_lib_optint(L, 3, 1); + int32_t e = L->base+3 < L->top ? lj_lib_checkint(L, 4) : + (int32_t)lj_tab_len(t); + luaL_buffinit(L, &b); + if (i <= e) { + for (;;) { + cTValue *o; + lua_rawgeti(L, 1, i); + o = L->top-1; + if (!(tvisstr(o) || tvisnumber(o))) + lj_err_callerv(L, LJ_ERR_TABCAT, typename(o), i); + luaL_addvalue(&b); + if (i++ == e) break; + if (seplen) + luaL_addlstring(&b, strdata(sep), seplen); + } + } + luaL_pushresult(&b); + return 1; +} + +/* ------------------------------------------------------------------------ */ + +static void set2(lua_State *L, int i, int j) +{ + lua_rawseti(L, 1, i); + lua_rawseti(L, 1, j); +} + +static int sort_comp(lua_State *L, int a, int b) +{ + if (lua_isfunction(L, 2)) { /* function? */ + int res; + lua_pushvalue(L, 2); + lua_pushvalue(L, a-1); /* -1 to compensate function */ + lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */ + lua_call(L, 2, 1); + res = lua_toboolean(L, -1); + lua_pop(L, 1); + return res; + } + else if (!lua_isnil(L, 2)) { /* index? */ + int res; + lua_pushvalue(L, 2); + lua_gettable(L, a-1); + + lua_pushvalue(L, 2); + lua_gettable(L, b-2); + res = lua_lessthan(L, -2, -1); + lua_pop(L, 2); + return res; + } + else /* a < b? */ + return lua_lessthan(L, a, b); +} + +static void auxsort(lua_State *L, int l, int u) +{ + while (l < u) { /* for tail recursion */ + int i, j; + /* sort elements a[l], a[(l+u)/2] and a[u] */ + lua_rawgeti(L, 1, l); + lua_rawgeti(L, 1, u); + if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */ + set2(L, l, u); /* swap a[l] - a[u] */ + else + lua_pop(L, 2); + if (u-l == 1) break; /* only 2 elements */ + i = (l+u)/2; + lua_rawgeti(L, 1, i); + lua_rawgeti(L, 1, l); + if (sort_comp(L, -2, -1)) { /* a[i]<a[l]? */ + set2(L, i, l); + } else { + lua_pop(L, 1); /* remove a[l] */ + lua_rawgeti(L, 1, u); + if (sort_comp(L, -1, -2)) /* a[u]<a[i]? */ + set2(L, i, u); + else + lua_pop(L, 2); + } + if (u-l == 2) break; /* only 3 elements */ + lua_rawgeti(L, 1, i); /* Pivot */ + lua_pushvalue(L, -1); + lua_rawgeti(L, 1, u-1); + set2(L, i, u-1); + /* a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2 */ + i = l; j = u-1; + for (;;) { /* invariant: a[l..i] <= P <= a[j..u] */ + /* repeat ++i until a[i] >= P */ + while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) { + if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT); + lua_pop(L, 1); /* remove a[i] */ + } + /* repeat --j until a[j] <= P */ + while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) { + if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT); + lua_pop(L, 1); /* remove a[j] */ + } + if (j<i) { + lua_pop(L, 3); /* pop pivot, a[i], a[j] */ + break; + } + set2(L, i, j); + } + lua_rawgeti(L, 1, u-1); + lua_rawgeti(L, 1, i); + set2(L, u-1, i); /* swap pivot (a[u-1]) with a[i] */ + /* a[l..i-1] <= a[i] == P <= a[i+1..u] */ + /* adjust so that smaller half is in [j..i] and larger one in [l..u] */ + if (i-l < u-i) { + j=l; i=i-1; l=i+2; + } else { + j=i+1; i=u; u=j-2; + } + auxsort(L, j, i); /* call recursively the smaller one */ + } /* repeat the routine for the larger one */ +} + +LJLIB_CF(table_sort) +{ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n = (int32_t)lj_tab_len(t); + lua_settop(L, 2); +/* if (!tvisnil(L->base+1)) + lj_lib_checkfunc(L, 2);*/ + auxsort(L, 1, n); + return 0; +} + +/* ------------------------------------------------------------------------ */ + +#include "lj_libdef.h" + +LUALIB_API int luaopen_table(lua_State *L) +{ + LJ_LIB_REG(L, LUA_TABLIBNAME, table); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + lua_getglobal(L, "unpack"); + lua_setfield(L, -2, "unpack"); +#endif + return 1; +} + diff --git a/src/luajit2/src/lj.supp b/src/luajit2/src/lj.supp new file mode 100644 index 0000000000..f1126ad767 --- /dev/null +++ b/src/luajit2/src/lj.supp @@ -0,0 +1,16 @@ +# Valgrind suppression file for LuaJIT 2.x. +{ + Optimized string compare + Memcheck:Addr4 + fun:lj_str_cmp +} +{ + Optimized string compare + Memcheck:Addr4 + fun:lj_str_new +} +{ + Optimized string compare + Memcheck:Cond + fun:lj_str_new +} diff --git a/src/luajit2/src/lj_alloc.c b/src/luajit2/src/lj_alloc.c new file mode 100644 index 0000000000..8d4edb5eab --- /dev/null +++ b/src/luajit2/src/lj_alloc.c @@ -0,0 +1,1381 @@ +/* +** Bundled memory allocator. +** +** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc. +** The original bears the following remark: +** +** This is a version (aka dlmalloc) of malloc/free/realloc written by +** Doug Lea and released to the public domain, as explained at +** http://creativecommons.org/licenses/publicdomain. +** +** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee) +** +** No additional copyright is claimed over the customizations. +** Please do NOT bother the original author about this version here! +** +** If you want to use dlmalloc in another project, you should get +** the original from: ftp://gee.cs.oswego.edu/pub/misc/ +** For thread-safe derivatives, take a look at: +** - ptmalloc: http://www.malloc.de/ +** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/ +*/ + +#define lj_alloc_c +#define LUA_CORE + +/* To get the mremap prototype. Must be defined before any system includes. */ +#if defined(__linux__) && !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif + +#include "lj_def.h" +#include "lj_arch.h" +#include "lj_alloc.h" + +#ifndef LUAJIT_USE_SYSMALLOC + +#define MAX_SIZE_T (~(size_t)0) +#define MALLOC_ALIGNMENT ((size_t)8U) + +#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U) +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U) +#define MAX_RELEASE_CHECK_RATE 255 + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP support ------------------------------- */ + +#define MFAIL ((void *)(MAX_SIZE_T)) +#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ + +#define IS_DIRECT_BIT (SIZE_T_ONE) + +#if LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +#if LJ_64 + +/* Undocumented, but hey, that's what we all love so much about Windows. */ +typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits, + size_t *size, ULONG alloctype, ULONG prot); +static PNTAVM ntavm; + +/* Number of top bits of the lower 32 bits of an address that must be zero. +** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB. +*/ +#define NTAVM_ZEROBITS 1 + +static void INIT_MMAP(void) +{ + ntavm = (PNTAVM)GetProcAddress(GetModuleHandle("ntdll.dll"), + "NtAllocateVirtualMemory"); +} + +/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = NULL; + long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, + MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + SetLastError(olderr); + return st == 0 ? ptr : MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static LJ_AINLINE void *DIRECT_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = NULL; + long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size, + MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE); + SetLastError(olderr); + return st == 0 ? ptr : MFAIL; +} + +#else + +#define INIT_MMAP() ((void)0) + +/* Win32 MMAP via VirtualAlloc */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + SetLastError(olderr); + return ptr ? ptr : MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static LJ_AINLINE void *DIRECT_MMAP(size_t size) +{ + DWORD olderr = GetLastError(); + void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + SetLastError(olderr); + return ptr ? ptr : MFAIL; +} + +#endif + +/* This function supports releasing coalesed segments */ +static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) +{ + DWORD olderr = GetLastError(); + MEMORY_BASIC_INFORMATION minfo; + char *cptr = (char *)ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + SetLastError(olderr); + return 0; +} + +#else + +#include <errno.h> +#include <sys/mman.h> + +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) + +#if LJ_64 +/* 64 bit mode needs special support for allocating memory in the lower 2GB. */ + +#if LJ_TARGET_LINUX + +/* Actually this only gives us max. 1GB in current Linux kernels. */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + void *ptr = mmap(NULL, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0); + errno = olderr; + return ptr; +} + +#elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + +/* OSX and FreeBSD mmap() use a naive first-fit linear search. +** That's perfect for us. Except that -pagezero_size must be set for OSX, +** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs +** to be reduced to 250MB on FreeBSD. +*/ +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <sys/resource.h> +#define MMAP_REGION_START ((uintptr_t)0x10000000) +#else +#define MMAP_REGION_START ((uintptr_t)0x10000) +#endif +#define MMAP_REGION_END ((uintptr_t)0x80000000) + +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + /* Hint for next allocation. Doesn't need to be thread-safe. */ + static uintptr_t alloc_hint = MMAP_REGION_START; + int retry = 0; +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + static int rlimit_modified = 0; + if (LJ_UNLIKELY(rlimit_modified == 0)) { + struct rlimit rlim; + rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START; + setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */ + rlimit_modified = 1; + } +#endif + for (;;) { + void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0); + if ((uintptr_t)p >= MMAP_REGION_START && + (uintptr_t)p + size < MMAP_REGION_END) { + alloc_hint = (uintptr_t)p + size; + errno = olderr; + return p; + } + if (p != CMFAIL) munmap(p, size); + if (retry) break; + retry = 1; + alloc_hint = MMAP_REGION_START; + } + errno = olderr; + return CMFAIL; +} + +#else + +#error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS" + +#endif + +#else + +/* 32 bit mode is easy. */ +static LJ_AINLINE void *CALL_MMAP(size_t size) +{ + int olderr = errno; + void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); + errno = olderr; + return ptr; +} + +#endif + +#define INIT_MMAP() ((void)0) +#define DIRECT_MMAP(s) CALL_MMAP(s) + +static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) +{ + int olderr = errno; + int ret = munmap(ptr, size); + errno = olderr; + return ret; +} + +#if LJ_TARGET_LINUX +/* Need to define _GNU_SOURCE to get the mremap prototype. */ +static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, + int flags) +{ + int olderr = errno; + ptr = mremap(ptr, osz, nsz, flags); + errno = olderr; + return ptr; +} + +#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv)) +#define CALL_MREMAP_NOMOVE 0 +#define CALL_MREMAP_MAYMOVE 1 +#if LJ_64 +#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE +#else +#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE +#endif +#endif + +#endif + +#ifndef CALL_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL) +#endif + +/* ----------------------- Chunk representations ------------------------ */ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#define CHUNK_OVERHEAD (SIZE_T_SIZE) + +/* Direct chunks need a second word of overhead ... */ +#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + +/* ------------------ Operations on head and foot fields ----------------- */ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_direct(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* ---------------------- Overlaid data structures ----------------------- */ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; + + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +struct malloc_segment { + char *base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ +}; + +typedef struct malloc_segment msegment; +typedef struct malloc_segment *msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + msegment seg; +}; + +typedef struct malloc_state *mstate; + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* page-align a size */ +#define page_align(S)\ + (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\ + & ~(DEFAULT_GRANULARITY - SIZE_T_ONE)) + +#if LJ_TARGET_WINDOWS +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char *addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) +{ + msegmentptr sp = &m->seg; + for (;;) { + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#define compute_tree_index(S, I)\ +{\ + unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\ + if (X == 0) {\ + I = 0;\ + } else if (X > 0xFFFF) {\ + I = NTREEBINS-1;\ + } else {\ + unsigned int K = lj_fls(X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | (~(x<<1)+1)) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else\ + F = B->fd;\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + if (F == B) {\ + clear_smallmap(M, I);\ + } else {\ + F->bk = B;\ + B->fd = F;\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + if (B == F) {\ + clear_smallmap(M, I);\ + } else {\ + B->fd = F;\ + F->bk = B;\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr *H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + } else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0) {\ + T = *C;\ + } else {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + } else {\ + tchunkptr F = T->fd;\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + }\ + }\ +} + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + F->bk = R;\ + R->fd = F;\ + } else {\ + tchunkptr *RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr *CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + *RP = 0;\ + }\ + }\ + if (XP != 0) {\ + tbinptr *H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + } else {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + if (R != 0) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + if ((C1 = X->child[1]) != 0) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) { insert_small_chunk(M, P, S)\ + } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) { unlink_small_chunk(M, P, S)\ + } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +static void *direct_alloc(size_t nb) +{ + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ + char *mm = (char *)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - DIRECT_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_DIRECT_BIT; + p->head = psize|CINUSE_BIT; + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + return chunk2mem(p); + } + } + return NULL; +} + +static mchunkptr direct_resize(mchunkptr oldp, size_t nb) +{ + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink direct regions below small size */ + return NULL; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (DEFAULT_GRANULARITY << 1)) { + return oldp; + } else { + size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT; + size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char *cp = (char *)CALL_MREMAP((char *)oldp - offset, + oldmmsize, newmmsize, CALL_MREMAP_MV); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - DIRECT_FOOT_PAD; + newp->head = psize|CINUSE_BIT; + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + return newp; + } + } + return NULL; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) +{ + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char *)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) +{ + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; i++) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) +{ + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (size_t)((char *)oldfirst - (char *)p); + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + } + + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char *tbase, size_t tsize) +{ + /* Determine locations and sizes of segment, fenceposts, old top */ + char *old_top = (char *)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char *old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char *asp = rawsp + offset; + char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + if ((char *)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = (size_t)(csp - old_top); + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } +} + +/* -------------------------- System allocation -------------------------- */ + +static void *alloc_sys(mstate m, size_t nb) +{ + char *tbase = CMFAIL; + size_t tsize = 0; + + /* Directly map large chunks */ + if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { + void *mem = direct_alloc(nb); + if (mem != 0) + return mem; + } + + { + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ + char *mp = (char *)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + } + } + } + + if (tbase != CMFAIL) { + msegmentptr sp = &m->seg; + /* Try to merge with an existing segment */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } else { + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0) { + char *oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } else { + add_segment(m, tbase, tsize); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + return chunk2mem(p); + } + } + + return NULL; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) +{ + size_t released = 0; + size_t nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char *base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + nsegs++; + { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ? + nsegs : MAX_RELEASE_CHECK_RATE; + return released; +} + +static int alloc_trim(mstate m, size_t pad) +{ + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = DEFAULT_GRANULARITY; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); + + if (sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + + if (released != 0) { + sp->size -= released; + init_top(m, m->top, m->topsize - released); + } + } + + /* Unmap any unused mmapped segments */ + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void *tmalloc_large(mstate m, size_t nb) +{ + tchunkptr v = 0; + size_t rsize = ~nb+1; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) + t = *treebin_at(m, lj_ffs(leftbits)); + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return NULL so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + mchunkptr r = chunk_plus_offset(v, nb); + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(m, v, (rsize + nb)); + } else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + return NULL; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void *tmalloc_small(mstate m, size_t nb) +{ + tchunkptr t, v; + mchunkptr r; + size_t rsize; + bindex_t i = lj_ffs(m->treemap); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + r = chunk_plus_offset(v, nb); + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(m, v, (rsize + nb)); + } else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); +} + +/* ----------------------------------------------------------------------- */ + +void *lj_alloc_create(void) +{ + size_t tsize = DEFAULT_GRANULARITY; + char *tbase; + INIT_MMAP(); + tbase = (char *)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = tbase; + m->seg.size = tsize; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + return m; + } + return NULL; +} + +void lj_alloc_destroy(void *msp) +{ + mstate ms = (mstate)msp; + msegmentptr sp = &ms->seg; + while (sp != 0) { + char *base = sp->base; + size_t size = sp->size; + sp = sp->next; + CALL_MUNMAP(base, size); + } +} + +static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize) +{ + mstate ms = (mstate)msp; + void *mem; + size_t nb; + if (nsize <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + return mem; + } else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + bindex_t i = lj_ffs(leftbits); + b = smallbin_at(ms, i); + p = b->fd; + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) { + set_inuse_and_pinuse(ms, p, small_index2size(i)); + } else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + return mem; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + return mem; + } + } + } else if (nsize >= MAX_REQUEST) { + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + } else { + nb = pad_request(nsize); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + return mem; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + return mem; + } else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + return mem; + } + return alloc_sys(ms, nb); +} + +static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr) +{ + if (ptr != 0) { + mchunkptr p = mem2chunk(ptr); + mstate fm = (mstate)msp; + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_DIRECT_BIT) != 0) { + prevsize &= ~IS_DIRECT_BIT; + psize += prevsize + DIRECT_FOOT_PAD; + CALL_MUNMAP((char *)p - prevsize, psize); + return NULL; + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + return NULL; + } + } + } + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (tsize > fm->trim_check) + alloc_trim(fm, 0); + return NULL; + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + return NULL; + } else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + return NULL; + } + } + } else { + set_free_with_pinuse(p, psize, next); + } + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + } else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + } + return NULL; +} + +static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize) +{ + if (nsize >= MAX_REQUEST) { + return NULL; + } else { + mstate m = (mstate)msp; + mchunkptr oldp = mem2chunk(ptr); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + size_t nb = request2size(nsize); + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + if (is_direct(oldp)) { + newp = direct_resize(oldp, nb); /* this may return NULL. */ + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr rem = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, rem, rsize); + lj_alloc_free(m, chunk2mem(rem)); + } + } else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + + if (newp != 0) { + return chunk2mem(newp); + } else { + void *newmem = lj_alloc_malloc(m, nsize); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, ptr, oc < nsize ? oc : nsize); + lj_alloc_free(m, ptr); + } + return newmem; + } + } +} + +void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize) +{ + (void)osize; + if (nsize == 0) { + return lj_alloc_free(msp, ptr); + } else if (ptr == NULL) { + return lj_alloc_malloc(msp, nsize); + } else { + return lj_alloc_realloc(msp, ptr, nsize); + } +} + +#endif diff --git a/src/luajit2/src/lj_alloc.h b/src/luajit2/src/lj_alloc.h new file mode 100644 index 0000000000..f87a7cf342 --- /dev/null +++ b/src/luajit2/src/lj_alloc.h @@ -0,0 +1,17 @@ +/* +** Bundled memory allocator. +** Donated to the public domain. +*/ + +#ifndef _LJ_ALLOC_H +#define _LJ_ALLOC_H + +#include "lj_def.h" + +#ifndef LUAJIT_USE_SYSMALLOC +LJ_FUNC void *lj_alloc_create(void); +LJ_FUNC void lj_alloc_destroy(void *msp); +LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize); +#endif + +#endif diff --git a/src/luajit2/src/lj_api.c b/src/luajit2/src/lj_api.c new file mode 100644 index 0000000000..a6fbb1c6aa --- /dev/null +++ b/src/luajit2/src/lj_api.c @@ -0,0 +1,1214 @@ +/* +** Public Lua/C API. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_api_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_udata.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_bc.h" +#include "lj_frame.h" +#include "lj_trace.h" +#include "lj_vm.h" +#include "lj_lex.h" +#include "lj_bcdump.h" +#include "lj_parse.h" + +/* -- Common helper functions --------------------------------------------- */ + +#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) +#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L)) + +static TValue *index2adr(lua_State *L, int idx) +{ + if (idx > 0) { + TValue *o = L->base + (idx - 1); + return o < L->top ? o : niltv(L); + } else if (idx > LUA_REGISTRYINDEX) { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } else if (idx == LUA_GLOBALSINDEX) { + TValue *o = &G(L)->tmptv; + settabV(L, o, tabref(L->env)); + return o; + } else if (idx == LUA_REGISTRYINDEX) { + return registry(L); + } else { + GCfunc *fn = curr_func(L); + api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn)); + if (idx == LUA_ENVIRONINDEX) { + TValue *o = &G(L)->tmptv; + settabV(L, o, tabref(fn->c.env)); + return o; + } else { + idx = LUA_GLOBALSINDEX - idx; + return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L); + } + } +} + +static TValue *stkindex2adr(lua_State *L, int idx) +{ + if (idx > 0) { + TValue *o = L->base + (idx - 1); + return o < L->top ? o : niltv(L); + } else { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } +} + +static GCtab *getcurrenv(lua_State *L) +{ + GCfunc *fn = curr_func(L); + return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env); +} + +/* -- Miscellaneous API functions ----------------------------------------- */ + +LUA_API int lua_status(lua_State *L) +{ + return L->status; +} + +LUA_API int lua_checkstack(lua_State *L, int size) +{ + if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) { + return 0; /* Stack overflow. */ + } else if (size > 0) { + lj_state_checkstack(L, (MSize)size); + } + return 1; +} + +LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg) +{ + if (!lua_checkstack(L, size)) + lj_err_callerv(L, LJ_ERR_STKOVM, msg); +} + +LUA_API void lua_xmove(lua_State *from, lua_State *to, int n) +{ + TValue *f, *t; + if (from == to) return; + api_checknelems(from, n); + api_check(from, G(from) == G(to)); + lj_state_checkstack(to, (MSize)n); + f = from->top; + t = to->top = to->top + n; + while (--n >= 0) copyTV(to, --t, --f); + from->top = f; +} + +/* -- Stack manipulation -------------------------------------------------- */ + +LUA_API int lua_gettop(lua_State *L) +{ + return (int)(L->top - L->base); +} + +LUA_API void lua_settop(lua_State *L, int idx) +{ + if (idx >= 0) { + api_check(L, idx <= tvref(L->maxstack) - L->base); + if (L->base + idx > L->top) { + if (L->base + idx >= tvref(L->maxstack)) + lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); + do { setnilV(L->top++); } while (L->top < L->base + idx); + } else { + L->top = L->base + idx; + } + } else { + api_check(L, -(idx+1) <= (L->top - L->base)); + L->top += idx+1; /* Shrinks top (idx < 0). */ + } +} + +LUA_API void lua_remove(lua_State *L, int idx) +{ + TValue *p = stkindex2adr(L, idx); + api_checkvalidindex(L, p); + while (++p < L->top) copyTV(L, p-1, p); + L->top--; +} + +LUA_API void lua_insert(lua_State *L, int idx) +{ + TValue *q, *p = stkindex2adr(L, idx); + api_checkvalidindex(L, p); + for (q = L->top; q > p; q--) copyTV(L, q, q-1); + copyTV(L, p, L->top); +} + +LUA_API void lua_replace(lua_State *L, int idx) +{ + api_checknelems(L, 1); + if (idx == LUA_GLOBALSINDEX) { + api_check(L, tvistab(L->top-1)); + /* NOBARRIER: A thread (i.e. L) is never black. */ + setgcref(L->env, obj2gco(tabV(L->top-1))); + } else if (idx == LUA_ENVIRONINDEX) { + GCfunc *fn = curr_func(L); + if (fn->c.gct != ~LJ_TFUNC) + lj_err_msg(L, LJ_ERR_NOENV); + api_check(L, tvistab(L->top-1)); + setgcref(fn->c.env, obj2gco(tabV(L->top-1))); + lj_gc_barrier(L, fn, L->top-1); + } else { + TValue *o = index2adr(L, idx); + api_checkvalidindex(L, o); + copyTV(L, o, L->top-1); + if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ + lj_gc_barrier(L, curr_func(L), L->top-1); + } + L->top--; +} + +LUA_API void lua_pushvalue(lua_State *L, int idx) +{ + copyTV(L, L->top, index2adr(L, idx)); + incr_top(L); +} + +/* -- Stack getters ------------------------------------------------------- */ + +LUA_API int lua_type(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisnumber(o)) { + return LUA_TNUMBER; +#if LJ_64 + } else if (tvislightud(o)) { + return LUA_TLIGHTUSERDATA; +#endif + } else if (o == niltv(L)) { + return LUA_TNONE; + } else { /* Magic internal/external tag conversion. ORDER LJ_T */ + uint32_t t = ~itype(o); +#if LJ_64 + int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u); +#else + int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); +#endif + lua_assert(tt != LUA_TNIL || tvisnil(o)); + return tt; + } +} + +LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt) +{ + if (lua_type(L, idx) != tt) + lj_err_argt(L, idx, tt); +} + +LUALIB_API void luaL_checkany(lua_State *L, int idx) +{ + if (index2adr(L, idx) == niltv(L)) + lj_err_arg(L, idx, LJ_ERR_NOVAL); +} + +LUA_API const char *lua_typename(lua_State *L, int t) +{ + UNUSED(L); + return lj_obj_typename[t+1]; +} + +LUA_API int lua_iscfunction(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return tvisfunc(o) && !isluafunc(funcV(o)); +} + +LUA_API int lua_isnumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + return (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), &tmp))); +} + +LUA_API int lua_isstring(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (tvisstr(o) || tvisnumber(o)); +} + +LUA_API int lua_isuserdata(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (tvisudata(o) || tvislightud(o)); +} + +LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2); +} + +LUA_API int lua_equal(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + if (tvisint(o1) && tvisint(o2)) { + return intV(o1) == intV(o2); + } else if (tvisnumber(o1) && tvisnumber(o2)) { + return numberVnum(o1) == numberVnum(o2); + } else if (itype(o1) != itype(o2)) { + return 0; + } else if (tvispri(o1)) { + return o1 != niltv(L) && o2 != niltv(L); +#if LJ_64 + } else if (tvislightud(o1)) { + return o1->u64 == o2->u64; +#endif + } else if (gcrefeq(o1->gcr, o2->gcr)) { + return 1; + } else if (!tvistabud(o1)) { + return 0; + } else { + TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0); + if ((uintptr_t)base <= 1) { + return (int)(uintptr_t)base; + } else { + L->top = base+2; + lj_vm_call(L, base, 1+1); + L->top -= 2; + return tvistruecond(L->top+1); + } + } +} + +LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2) +{ + cTValue *o1 = index2adr(L, idx1); + cTValue *o2 = index2adr(L, idx2); + if (o1 == niltv(L) || o2 == niltv(L)) { + return 0; + } else if (tvisint(o1) && tvisint(o2)) { + return intV(o1) < intV(o2); + } else if (tvisnumber(o1) && tvisnumber(o2)) { + return numberVnum(o1) < numberVnum(o2); + } else { + TValue *base = lj_meta_comp(L, o1, o2, 0); + if ((uintptr_t)base <= 1) { + return (int)(uintptr_t)base; + } else { + L->top = base+2; + lj_vm_call(L, base, 1+1); + L->top -= 2; + return tvistruecond(L->top+1); + } + } +} + +LUA_API lua_Number lua_tonumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (tvisstr(o) && lj_str_tonum(strV(o), &tmp)) + return numV(&tmp); + else + return 0; +} + +LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (!(tvisstr(o) && lj_str_tonum(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + return numV(&tmp); +} + +LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + if (LJ_LIKELY(tvisnumber(o))) + return numberVnum(o); + else if (tvisnil(o)) + return def; + else if (!(tvisstr(o) && lj_str_tonum(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + return numV(&tmp); +} + +LUA_API lua_Integer lua_tointeger(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else { + if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp))) + return 0; + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else { + if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def) +{ + cTValue *o = index2adr(L, idx); + TValue tmp; + lua_Number n; + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else if (LJ_LIKELY(tvisnum(o))) { + n = numV(o); + } else if (tvisnil(o)) { + return def; + } else { + if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp))) + lj_err_argt(L, idx, LUA_TNUMBER); + if (tvisint(&tmp)) + return (lua_Integer)intV(&tmp); + n = numV(&tmp); + } +#if LJ_64 + return (lua_Integer)n; +#else + return lj_num2int(n); +#endif +} + +LUA_API int lua_toboolean(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return tvistruecond(o); +} + +LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_str_fromnumber(L, o); + } else { + if (len != NULL) *len = 0; + return NULL; + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_str_fromnumber(L, o); + } else { + lj_err_argt(L, idx, LUA_TSTRING); + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API const char *luaL_optlstring(lua_State *L, int idx, + const char *def, size_t *len) +{ + TValue *o = index2adr(L, idx); + GCstr *s; + if (LJ_LIKELY(tvisstr(o))) { + s = strV(o); + } else if (tvisnil(o)) { + if (len != NULL) *len = def ? strlen(def) : 0; + return def; + } else if (tvisnumber(o)) { + lj_gc_check(L); + o = index2adr(L, idx); /* GC may move the stack. */ + s = lj_str_fromnumber(L, o); + } else { + lj_err_argt(L, idx, LUA_TSTRING); + } + if (len != NULL) *len = s->len; + return strdata(s); +} + +LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def, + const char *const lst[]) +{ + ptrdiff_t i; + const char *s = lua_tolstring(L, idx, NULL); + if (s == NULL && (s = def) == NULL) + lj_err_argt(L, idx, LUA_TSTRING); + for (i = 0; lst[i]; i++) + if (strcmp(lst[i], s) == 0) + return (int)i; + lj_err_argv(L, idx, LJ_ERR_INVOPTM, s); +} + +LUA_API size_t lua_objlen(lua_State *L, int idx) +{ + TValue *o = index2adr(L, idx); + if (tvisstr(o)) + return strV(o)->len; + else if (tvistab(o)) + return (size_t)lj_tab_len(tabV(o)); + else if (tvisudata(o)) + return udataV(o)->len; + else if (tvisnumber(o)) + return lj_str_fromnumber(L, o)->len; + else + return 0; +} + +LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisfunc(o)) { + BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns)); + if (op == BC_FUNCC || op == BC_FUNCCW) + return funcV(o)->c.f; + } + return NULL; +} + +LUA_API void *lua_touserdata(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisudata(o)) + return uddata(udataV(o)); + else if (tvislightud(o)) + return lightudV(o); + else + return NULL; +} + +LUA_API lua_State *lua_tothread(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + return (!tvisthread(o)) ? NULL : threadV(o); +} + +LUA_API const void *lua_topointer(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + if (tvisudata(o)) + return uddata(udataV(o)); + else if (tvislightud(o)) + return lightudV(o); + else if (tviscdata(o)) + return cdataptr(cdataV(o)); + else if (tvisgcv(o)) + return gcV(o); + else + return NULL; +} + +/* -- Stack setters (object creation) ------------------------------------- */ + +LUA_API void lua_pushnil(lua_State *L) +{ + setnilV(L->top); + incr_top(L); +} + +LUA_API void lua_pushnumber(lua_State *L, lua_Number n) +{ + setnumV(L->top, n); + if (LJ_UNLIKELY(tvisnan(L->top))) + setnanV(L->top); /* Canonicalize injected NaNs. */ + incr_top(L); +} + +LUA_API void lua_pushinteger(lua_State *L, lua_Integer n) +{ + setintptrV(L->top, n); + incr_top(L); +} + +LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len) +{ + GCstr *s; + lj_gc_check(L); + s = lj_str_new(L, str, len); + setstrV(L, L->top, s); + incr_top(L); +} + +LUA_API void lua_pushstring(lua_State *L, const char *str) +{ + if (str == NULL) { + setnilV(L->top); + } else { + GCstr *s; + lj_gc_check(L); + s = lj_str_newz(L, str); + setstrV(L, L->top, s); + } + incr_top(L); +} + +LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt, + va_list argp) +{ + lj_gc_check(L); + return lj_str_pushvf(L, fmt, argp); +} + +LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...) +{ + const char *ret; + va_list argp; + lj_gc_check(L); + va_start(argp, fmt); + ret = lj_str_pushvf(L, fmt, argp); + va_end(argp); + return ret; +} + +LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n) +{ + GCfunc *fn; + lj_gc_check(L); + api_checknelems(L, n); + fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); + fn->c.f = f; + L->top -= n; + while (n--) + copyTV(L, &fn->c.upvalue[n], L->top+n); + setfuncV(L, L->top, fn); + lua_assert(iswhite(obj2gco(fn))); + incr_top(L); +} + +LUA_API void lua_pushboolean(lua_State *L, int b) +{ + setboolV(L->top, (b != 0)); + incr_top(L); +} + +LUA_API void lua_pushlightuserdata(lua_State *L, void *p) +{ + setlightudV(L->top, checklightudptr(L, p)); + incr_top(L); +} + +LUA_API void lua_createtable(lua_State *L, int narray, int nrec) +{ + GCtab *t; + lj_gc_check(L); + t = lj_tab_new(L, (uint32_t)(narray > 0 ? narray+1 : 0), hsize2hbits(nrec)); + settabV(L, L->top, t); + incr_top(L); +} + +LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname) +{ + GCtab *regt = tabV(registry(L)); + TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname)); + if (tvisnil(tv)) { + GCtab *mt = lj_tab_new(L, 0, 1); + settabV(L, tv, mt); + settabV(L, L->top++, mt); + lj_gc_anybarriert(L, regt); + return 1; + } else { + copyTV(L, L->top++, tv); + return 0; + } +} + +LUA_API int lua_pushthread(lua_State *L) +{ + setthreadV(L, L->top, L); + incr_top(L); + return (mainthread(G(L)) == L); +} + +LUA_API lua_State *lua_newthread(lua_State *L) +{ + lua_State *L1; + lj_gc_check(L); + L1 = lj_state_new(L); + setthreadV(L, L->top, L1); + incr_top(L); + return L1; +} + +LUA_API void *lua_newuserdata(lua_State *L, size_t size) +{ + GCudata *ud; + lj_gc_check(L); + if (size > LJ_MAX_UDATA) + lj_err_msg(L, LJ_ERR_UDATAOV); + ud = lj_udata_new(L, (MSize)size, getcurrenv(L)); + setudataV(L, L->top, ud); + incr_top(L); + return uddata(ud); +} + +LUA_API void lua_concat(lua_State *L, int n) +{ + api_checknelems(L, n); + if (n >= 2) { + n--; + do { + TValue *top = lj_meta_cat(L, L->top-1, n); + if (top == NULL) { + L->top -= n; + break; + } + n -= (int)(L->top - top); + L->top = top+2; + lj_vm_call(L, top, 1+1); + L->top--; + copyTV(L, L->top-1, L->top); + } while (--n > 0); + } else if (n == 0) { /* Push empty string. */ + setstrV(L, L->top, &G(L)->strempty); + incr_top(L); + } + /* else n == 1: nothing to do. */ +} + +/* -- Object getters ------------------------------------------------------ */ + +LUA_API void lua_gettable(lua_State *L, int idx) +{ + cTValue *v, *t = index2adr(L, idx); + api_checkvalidindex(L, t); + v = lj_meta_tget(L, t, L->top-1); + if (v == NULL) { + L->top += 2; + lj_vm_call(L, L->top-2, 1+1); + L->top -= 2; + v = L->top+1; + } + copyTV(L, L->top-1, v); +} + +LUA_API void lua_getfield(lua_State *L, int idx, const char *k) +{ + cTValue *v, *t = index2adr(L, idx); + TValue key; + api_checkvalidindex(L, t); + setstrV(L, &key, lj_str_newz(L, k)); + v = lj_meta_tget(L, t, &key); + if (v == NULL) { + L->top += 2; + lj_vm_call(L, L->top-2, 1+1); + L->top -= 2; + v = L->top+1; + } + copyTV(L, L->top, v); + incr_top(L); +} + +LUA_API void lua_rawget(lua_State *L, int idx) +{ + cTValue *t = index2adr(L, idx); + api_check(L, tvistab(t)); + copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); +} + +LUA_API void lua_rawgeti(lua_State *L, int idx, int n) +{ + cTValue *v, *t = index2adr(L, idx); + api_check(L, tvistab(t)); + v = lj_tab_getint(tabV(t), n); + if (v) { + copyTV(L, L->top, v); + } else { + setnilV(L->top); + } + incr_top(L); +} + +LUA_API int lua_getmetatable(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + GCtab *mt = NULL; + if (tvistab(o)) + mt = tabref(tabV(o)->metatable); + else if (tvisudata(o)) + mt = tabref(udataV(o)->metatable); + else + mt = tabref(basemt_obj(G(L), o)); + if (mt == NULL) + return 0; + settabV(L, L->top, mt); + incr_top(L); + return 1; +} + +LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field) +{ + if (lua_getmetatable(L, idx)) { + cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field)); + if (tv && !tvisnil(tv)) { + copyTV(L, L->top-1, tv); + return 1; + } + L->top--; + } + return 0; +} + +LUA_API void lua_getfenv(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + api_checkvalidindex(L, o); + if (tvisfunc(o)) { + settabV(L, L->top, tabref(funcV(o)->c.env)); + } else if (tvisudata(o)) { + settabV(L, L->top, tabref(udataV(o)->env)); + } else if (tvisthread(o)) { + settabV(L, L->top, tabref(threadV(o)->env)); + } else { + setnilV(L->top); + } + incr_top(L); +} + +LUA_API int lua_next(lua_State *L, int idx) +{ + cTValue *t = index2adr(L, idx); + int more; + api_check(L, tvistab(t)); + more = lj_tab_next(L, tabV(t), L->top-1); + if (more) { + incr_top(L); /* Return new key and value slot. */ + } else { /* End of traversal. */ + L->top--; /* Remove key slot. */ + } + return more; +} + +LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n) +{ + TValue *val; + const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val); + if (name) { + copyTV(L, L->top, val); + incr_top(L); + } + return name; +} + +LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname) +{ + cTValue *o = index2adr(L, idx); + if (tvisudata(o)) { + GCudata *ud = udataV(o); + cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname)); + if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable)) + return uddata(ud); + } + lj_err_argtype(L, idx, tname); + return NULL; /* unreachable */ +} + +/* -- Object setters ------------------------------------------------------ */ + +LUA_API void lua_settable(lua_State *L, int idx) +{ + TValue *o; + cTValue *t = index2adr(L, idx); + api_checknelems(L, 2); + api_checkvalidindex(L, t); + o = lj_meta_tset(L, t, L->top-2); + if (o) { + /* NOBARRIER: lj_meta_tset ensures the table is not black. */ + copyTV(L, o, L->top-1); + L->top -= 2; + } else { + L->top += 3; + copyTV(L, L->top-1, L->top-6); + lj_vm_call(L, L->top-3, 0+1); + L->top -= 3; + } +} + +LUA_API void lua_setfield(lua_State *L, int idx, const char *k) +{ + TValue *o; + TValue key; + cTValue *t = index2adr(L, idx); + api_checknelems(L, 1); + api_checkvalidindex(L, t); + setstrV(L, &key, lj_str_newz(L, k)); + o = lj_meta_tset(L, t, &key); + if (o) { + L->top--; + /* NOBARRIER: lj_meta_tset ensures the table is not black. */ + copyTV(L, o, L->top); + } else { + L->top += 3; + copyTV(L, L->top-1, L->top-6); + lj_vm_call(L, L->top-3, 0+1); + L->top -= 2; + } +} + +LUA_API void lua_rawset(lua_State *L, int idx) +{ + GCtab *t = tabV(index2adr(L, idx)); + TValue *dst, *key; + api_checknelems(L, 2); + key = L->top-2; + dst = lj_tab_set(L, t, key); + copyTV(L, dst, key+1); + lj_gc_anybarriert(L, t); + L->top = key; +} + +LUA_API void lua_rawseti(lua_State *L, int idx, int n) +{ + GCtab *t = tabV(index2adr(L, idx)); + TValue *dst, *src; + api_checknelems(L, 1); + dst = lj_tab_setint(L, t, n); + src = L->top-1; + copyTV(L, dst, src); + lj_gc_barriert(L, t, dst); + L->top = src; +} + +LUA_API int lua_setmetatable(lua_State *L, int idx) +{ + global_State *g; + GCtab *mt; + cTValue *o = index2adr(L, idx); + api_checknelems(L, 1); + api_checkvalidindex(L, o); + if (tvisnil(L->top-1)) { + mt = NULL; + } else { + api_check(L, tvistab(L->top-1)); + mt = tabV(L->top-1); + } + g = G(L); + if (tvistab(o)) { + setgcref(tabV(o)->metatable, obj2gco(mt)); + if (mt) + lj_gc_objbarriert(L, tabV(o), mt); + } else if (tvisudata(o)) { + setgcref(udataV(o)->metatable, obj2gco(mt)); + if (mt) + lj_gc_objbarrier(L, udataV(o), mt); + } else { + /* Flush cache, since traces specialize to basemt. But not during __gc. */ + if (lj_trace_flushall(L)) + lj_err_caller(L, LJ_ERR_NOGCMM); + if (tvisbool(o)) { + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt)); + setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt)); + } else { + /* NOBARRIER: basemt is a GC root. */ + setgcref(basemt_obj(g, o), obj2gco(mt)); + } + } + L->top--; + return 1; +} + +LUA_API int lua_setfenv(lua_State *L, int idx) +{ + cTValue *o = index2adr(L, idx); + GCtab *t; + api_checknelems(L, 1); + api_checkvalidindex(L, o); + api_check(L, tvistab(L->top-1)); + t = tabV(L->top-1); + if (tvisfunc(o)) { + setgcref(funcV(o)->c.env, obj2gco(t)); + } else if (tvisudata(o)) { + setgcref(udataV(o)->env, obj2gco(t)); + } else if (tvisthread(o)) { + setgcref(threadV(o)->env, obj2gco(t)); + } else { + L->top--; + return 0; + } + lj_gc_objbarrier(L, gcV(o), t); + L->top--; + return 1; +} + +LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n) +{ + cTValue *f = index2adr(L, idx); + TValue *val; + const char *name; + api_checknelems(L, 1); + name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val); + if (name) { + L->top--; + copyTV(L, val, L->top); + lj_gc_barrier(L, funcV(f), L->top); + } + return name; +} + +/* -- Calls --------------------------------------------------------------- */ + +LUA_API void lua_call(lua_State *L, int nargs, int nresults) +{ + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + api_checknelems(L, nargs+1); + lj_vm_call(L, L->top - nargs, nresults+1); +} + +LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc) +{ + global_State *g = G(L); + uint8_t oldh = hook_save(g); + ptrdiff_t ef; + int status; + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + api_checknelems(L, nargs+1); + if (errfunc == 0) { + ef = 0; + } else { + cTValue *o = stkindex2adr(L, errfunc); + api_checkvalidindex(L, o); + ef = savestack(L, o); + } + status = lj_vm_pcall(L, L->top - nargs, nresults+1, ef); + if (status) hook_restore(g, oldh); + return status; +} + +static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud) +{ + GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L)); + fn->c.f = func; + setfuncV(L, L->top, fn); + setlightudV(L->top+1, checklightudptr(L, ud)); + cframe_nres(L->cframe) = 1+0; /* Zero results. */ + L->top += 2; + return L->top-1; /* Now call the newly allocated C function. */ +} + +LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud) +{ + global_State *g = G(L); + uint8_t oldh = hook_save(g); + int status; + api_check(L, L->status == 0 || L->status == LUA_ERRERR); + status = lj_vm_cpcall(L, func, ud, cpcall); + if (status) hook_restore(g, oldh); + return status; +} + +LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field) +{ + if (luaL_getmetafield(L, idx, field)) { + TValue *base = L->top--; + copyTV(L, base, index2adr(L, idx)); + L->top = base+1; + lj_vm_call(L, base, 1+1); + return 1; + } + return 0; +} + +/* -- Coroutine yield and resume ------------------------------------------ */ + +LUA_API int lua_yield(lua_State *L, int nresults) +{ + void *cf = L->cframe; + global_State *g = G(L); + if (cframe_canyield(cf)) { + cf = cframe_raw(cf); + if (!hook_active(g)) { /* Regular yield: move results down if needed. */ + cTValue *f = L->top - nresults; + if (f > L->base) { + TValue *t = L->base; + while (--nresults >= 0) copyTV(L, t++, f++); + L->top = t; + } + } else { /* Yield from hook: add a pseudo-frame. */ + TValue *top = L->top; + hook_leave(g); + top->u64 = cframe_multres(cf); + setcont(top+1, lj_cont_hook); + setframe_pc(top+1, cframe_pc(cf)-1); + setframe_gc(top+2, obj2gco(L)); + setframe_ftsz(top+2, (int)((char *)(top+3)-(char *)L->base)+FRAME_CONT); + L->top = L->base = top+3; + } +#if LJ_TARGET_X64 + lj_err_throw(L, LUA_YIELD); +#else + L->cframe = NULL; + L->status = LUA_YIELD; + lj_vm_unwind_c(cf, LUA_YIELD); +#endif + } + lj_err_msg(L, LJ_ERR_CYIELD); + return 0; /* unreachable */ +} + +LUA_API int lua_resume(lua_State *L, int nargs) +{ + if (L->cframe == NULL && L->status <= LUA_YIELD) + return lj_vm_resume(L, L->top - nargs, 0, 0); + L->top = L->base; + setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP)); + incr_top(L); + return LUA_ERRRUN; +} + +/* -- Load and dump Lua code ---------------------------------------------- */ + +static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud) +{ + LexState *ls = (LexState *)ud; + GCproto *pt; + GCfunc *fn; + UNUSED(dummy); + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + pt = lj_lex_setup(L, ls) ? lj_bcread(ls) : lj_parse(ls); + fn = lj_func_newL_empty(L, pt, tabref(L->env)); + /* Don't combine above/below into one statement. */ + setfuncV(L, L->top++, fn); + return NULL; +} + +LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data, + const char *chunkname) +{ + LexState ls; + int status; + ls.rfunc = reader; + ls.rdata = data; + ls.chunkarg = chunkname ? chunkname : "?"; + lj_str_initbuf(&ls.sb); + status = lj_vm_cpcall(L, NULL, &ls, cpparser); + lj_lex_cleanup(L, &ls); + lj_gc_check(L); + return status; +} + +LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) +{ + cTValue *o = L->top-1; + api_checknelems(L, 1); + if (tvisfunc(o) && isluafunc(funcV(o))) + return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); + else + return 1; +} + +/* -- GC and memory management -------------------------------------------- */ + +LUA_API int lua_gc(lua_State *L, int what, int data) +{ + global_State *g = G(L); + int res = 0; + switch (what) { + case LUA_GCSTOP: + g->gc.threshold = LJ_MAX_MEM; + break; + case LUA_GCRESTART: + g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total; + break; + case LUA_GCCOLLECT: + lj_gc_fullgc(L); + break; + case LUA_GCCOUNT: + res = (int)(g->gc.total >> 10); + break; + case LUA_GCCOUNTB: + res = (int)(g->gc.total & 0x3ff); + break; + case LUA_GCSTEP: { + MSize a = (MSize)data << 10; + g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0; + while (g->gc.total >= g->gc.threshold) + if (lj_gc_step(L)) { + res = 1; + break; + } + break; + } + case LUA_GCSETPAUSE: + res = (int)(g->gc.pause); + g->gc.pause = (MSize)data; + break; + case LUA_GCSETSTEPMUL: + res = (int)(g->gc.stepmul); + g->gc.stepmul = (MSize)data; + break; + default: + res = -1; /* Invalid option. */ + } + return res; +} + +LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud) +{ + global_State *g = G(L); + if (ud) *ud = g->allocd; + return g->allocf; +} + +LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud) +{ + global_State *g = G(L); + g->allocd = ud; + g->allocf = f; +} + diff --git a/src/luajit2/src/lj_arch.h b/src/luajit2/src/lj_arch.h new file mode 100644 index 0000000000..ff4628df94 --- /dev/null +++ b/src/luajit2/src/lj_arch.h @@ -0,0 +1,266 @@ +/* +** Target architecture selection. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ARCH_H +#define _LJ_ARCH_H + +#include "lua.h" + +/* Target endianess. */ +#define LUAJIT_LE 0 +#define LUAJIT_BE 1 + +/* Target architectures. */ +#define LUAJIT_ARCH_X86 1 +#define LUAJIT_ARCH_x86 1 +#define LUAJIT_ARCH_X64 2 +#define LUAJIT_ARCH_x64 2 +#define LUAJIT_ARCH_ARM 3 +#define LUAJIT_ARCH_arm 3 +#define LUAJIT_ARCH_PPC 4 +#define LUAJIT_ARCH_ppc 4 +#define LUAJIT_ARCH_PPCSPE 5 +#define LUAJIT_ARCH_ppcspe 5 + +/* Target OS. */ +#define LUAJIT_OS_OTHER 0 +#define LUAJIT_OS_WINDOWS 1 +#define LUAJIT_OS_LINUX 2 +#define LUAJIT_OS_OSX 3 +#define LUAJIT_OS_BSD 4 +#define LUAJIT_OS_POSIX 5 + +/* Select native target if no target defined. */ +#ifndef LUAJIT_TARGET + +#if defined(__i386) || defined(__i386__) || defined(_M_IX86) +#define LUAJIT_TARGET LUAJIT_ARCH_X86 +#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) +#define LUAJIT_TARGET LUAJIT_ARCH_X64 +#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM) +#define LUAJIT_TARGET LUAJIT_ARCH_ARM +#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC) +#ifdef __NO_FPRS__ +#define LUAJIT_TARGET LUAJIT_ARCH_PPCSPE +#else +#define LUAJIT_TARGET LUAJIT_ARCH_PPC +#endif +#else +#error "No support for this architecture (yet)" +#endif + +#endif + +/* Select native OS if no target OS defined. */ +#ifndef LUAJIT_OS + +#if defined(_WIN32) +#define LUAJIT_OS LUAJIT_OS_WINDOWS +#elif defined(__linux__) +#define LUAJIT_OS LUAJIT_OS_LINUX +#elif defined(__MACH__) && defined(__APPLE__) +#define LUAJIT_OS LUAJIT_OS_OSX +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ + defined(__NetBSD__) || defined(__OpenBSD__) +#define LUAJIT_OS LUAJIT_OS_BSD +#elif (defined(__sun__) && defined(__svr4__)) || defined(__solaris__) || \ + defined(__CYGWIN__) +#define LUAJIT_OS LUAJIT_OS_POSIX +#else +#define LUAJIT_OS LUAJIT_OS_OTHER +#endif + +#endif + +/* Set target OS properties. */ +#if LUAJIT_OS == LUAJIT_OS_WINDOWS +#define LJ_OS_NAME "Windows" +#elif LUAJIT_OS == LUAJIT_OS_LINUX +#define LJ_OS_NAME "Linux" +#elif LUAJIT_OS == LUAJIT_OS_OSX +#define LJ_OS_NAME "OSX" +#elif LUAJIT_OS == LUAJIT_OS_BSD +#define LJ_OS_NAME "BSD" +#elif LUAJIT_OS == LUAJIT_OS_POSIX +#define LJ_OS_NAME "POSIX" +#else +#define LJ_OS_NAME "Other" +#endif + +#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS) +#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX) +#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX) +#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS) +#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX + +/* Set target architecture properties. */ +#if LUAJIT_TARGET == LUAJIT_ARCH_X86 + +#define LJ_ARCH_NAME "x86" +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#define LJ_ARCH_HASFPU 1 +#define LJ_ABI_WIN LJ_TARGET_WINDOWS +#define LJ_TARGET_X86 1 +#define LJ_TARGET_X86ORX64 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_ARCH_DUALNUM 1 + +#elif LUAJIT_TARGET == LUAJIT_ARCH_X64 + +#define LJ_ARCH_NAME "x64" +#define LJ_ARCH_BITS 64 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#define LJ_ARCH_HASFPU 1 +#define LJ_ABI_WIN LJ_TARGET_WINDOWS +#define LJ_TARGET_X64 1 +#define LJ_TARGET_X86ORX64 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */ +#define LJ_TARGET_MASKSHIFT 1 +#define LJ_TARGET_MASKROT 1 +#define LJ_ARCH_DUALNUM 1 + +#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM + +#define LJ_ARCH_NAME "arm" +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_ENDIAN LUAJIT_LE +#define LJ_ARCH_HASFPU 0 +#define LJ_ABI_SOFTFP 1 +#define LJ_ABI_EABI 1 +#define LJ_TARGET_ARM 1 +#define LJ_TARGET_EHRETREG 0 +#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ +#define LJ_TARGET_MASKSHIFT 0 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ +#define LJ_ARCH_DUALNUM 2 +#if LJ_TARGET_OSX +/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */ +#define LJ_ARCH_NOJIT 1 +#endif + +#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC + +#error "No support for plain PowerPC CPUs (yet)" + +#elif LUAJIT_TARGET == LUAJIT_ARCH_PPCSPE + +#define LJ_ARCH_NAME "ppcspe" +#define LJ_ARCH_BITS 32 +#define LJ_ARCH_ENDIAN LUAJIT_BE +#define LJ_ARCH_HASFPU 1 +#define LJ_ABI_SOFTFP 1 +#define LJ_ABI_EABI 1 +#define LJ_TARGET_PPC 1 +#define LJ_TARGET_PPCSPE 1 +#define LJ_TARGET_EHRETREG 3 +#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ +#define LJ_TARGET_MASKSHIFT 0 +#define LJ_TARGET_MASKROT 1 +#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */ +#define LJ_ARCH_DUALNUM 0 +#define LJ_ARCH_NOFFI 1 /* NYI: comparisons, calls. */ +#define LJ_ARCH_NOJIT 1 + +#else +#error "No target architecture defined" +#endif + +#ifndef LJ_PAGESIZE +#define LJ_PAGESIZE 4096 +#endif + +/* Check for minimum required compiler versions. */ +#if defined(__GNUC__) +#if LJ_TARGET_X64 +#if __GNUC__ < 4 +#error "Need at least GCC 4.0 or newer" +#endif +#elif LJ_TARGET_ARM +#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) +#error "Need at least GCC 4.2 or newer" +#endif +#elif LJ_TARGET_PPC +#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3) +#error "Need at least GCC 4.3 or newer" +#endif +#else +#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4) +#error "Need at least GCC 3.4 or newer" +#endif +#endif +#endif + +/* Check target-specific constraints. */ +#ifndef _BUILDVM_H +#if LJ_TARGET_ARM +#if defined(__ARMEB__) +#error "No support for big-endian ARM" +#endif +#if !(__ARM_EABI__ || LJ_TARGET_OSX) +#error "Only ARM EABI or iOS 3.0+ ABI is supported" +#endif +#elif LJ_TARGET_PPC +#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE) +#error "No support for PowerPC CPUs without double-precision FPU" +#endif +#if defined(_LITTLE_ENDIAN) +#error "No support for little-endian PowerPC" +#endif +#if defined(_LP64) +#error "No support for PowerPC 64 bit mode" +#endif +#endif +#endif + +/* Enable or disable the dual-number VM. */ +#if LJ_ARCH_DUALNUM == 2 || \ + (defined(LUAJIT_ENABLE_DUALNUM) && LJ_ARCH_DUALNUM == 1) +#define LJ_DUALNUM 1 +#else +#define LJ_DUALNUM 0 +#endif + +/* Disable or enable the JIT compiler. */ +#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) +#define LJ_HASJIT 0 +#else +#define LJ_HASJIT 1 +#endif + +/* Disable or enable the FFI extension. */ +#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI) +#define LJ_HASFFI 0 +#else +#define LJ_HASFFI 1 +#endif + +#define LJ_SOFTFP (!LJ_ARCH_HASFPU) + +#if LJ_ARCH_ENDIAN == LUAJIT_BE +#define LJ_LE 0 +#define LJ_BE 1 +#define LJ_ENDIAN_SELECT(le, be) be +#define LJ_ENDIAN_LOHI(lo, hi) hi lo +#else +#define LJ_LE 1 +#define LJ_BE 0 +#define LJ_ENDIAN_SELECT(le, be) le +#define LJ_ENDIAN_LOHI(lo, hi) lo hi +#endif + +#if LJ_ARCH_BITS == 32 +#define LJ_32 1 +#define LJ_64 0 +#else +#define LJ_32 0 +#define LJ_64 1 +#endif + +#endif diff --git a/src/luajit2/src/lj_asm.c b/src/luajit2/src/lj_asm.c new file mode 100644 index 0000000000..b3fa773979 --- /dev/null +++ b/src/luajit2/src/lj_asm.c @@ -0,0 +1,1653 @@ +/* +** IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_asm_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_mcode.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_asm.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_target.h" + +/* -- Assembler state and common macros ----------------------------------- */ + +/* Assembler state. */ +typedef struct ASMState { + RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */ + + MCode *mcp; /* Current MCode pointer (grows down). */ + MCode *mclim; /* Lower limit for MCode memory + red zone. */ + + IRIns *ir; /* Copy of pointer to IR instructions/constants. */ + jit_State *J; /* JIT compiler state. */ + +#if LJ_TARGET_X86ORX64 + x86ModRM mrm; /* Fused x86 address operand. */ +#endif + + RegSet freeset; /* Set of free registers. */ + RegSet modset; /* Set of registers modified inside the loop. */ + RegSet weakset; /* Set of weakly referenced registers. */ + RegSet phiset; /* Set of PHI registers. */ + + uint32_t flags; /* Copy of JIT compiler flags. */ + int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */ + + int32_t evenspill; /* Next even spill slot. */ + int32_t oddspill; /* Next odd spill slot (or 0). */ + + IRRef curins; /* Reference of current instruction. */ + IRRef stopins; /* Stop assembly before hitting this instruction. */ + IRRef orignins; /* Original T->nins. */ + + IRRef snapref; /* Current snapshot is active after this reference. */ + IRRef snaprename; /* Rename highwater mark for snapshot check. */ + SnapNo snapno; /* Current snapshot number. */ + SnapNo loopsnapno; /* Loop snapshot number. */ + + IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */ + IRRef sectref; /* Section base reference (loopref or 0). */ + IRRef loopref; /* Reference of LOOP instruction (or 0). */ + + BCReg topslot; /* Number of slots for stack check (unless 0). */ + MSize gcsteps; /* Accumulated number of GC steps (per section). */ + + GCtrace *T; /* Trace to assemble. */ + GCtrace *parent; /* Parent trace (or NULL). */ + + MCode *mcbot; /* Bottom of reserved MCode. */ + MCode *mctop; /* Top of generated MCode. */ + MCode *mcloop; /* Pointer to loop MCode (or NULL). */ + MCode *invmcp; /* Points to invertible loop branch (or NULL). */ + MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */ + MCode *realign; /* Realign loop if not NULL. */ + +#ifdef RID_NUM_KREF + int32_t krefk[RID_NUM_KREF]; +#endif + IRRef1 phireg[RID_MAX]; /* PHI register references. */ + uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent slot to RegSP map. */ +#if LJ_SOFTFP + uint16_t parentmaphi[LJ_MAX_JSLOTS]; /* Parent slot to hi RegSP map. */ +#endif +} ASMState; + +#define IR(ref) (&as->ir[(ref)]) + +#define ASMREF_TMP1 REF_TRUE /* Temp. register. */ +#define ASMREF_TMP2 REF_FALSE /* Temp. register. */ +#define ASMREF_L REF_NIL /* Stores register for L. */ + +/* Check for variant to invariant references. */ +#define iscrossref(as, ref) ((ref) < as->sectref) + +/* Inhibit memory op fusion from variant to invariant references. */ +#define FUSE_DISABLED (~(IRRef)0) +#define mayfuse(as, ref) ((ref) > as->fuseref) +#define neverfuse(as) (as->fuseref == FUSE_DISABLED) +#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t)) +#define opisfusableload(o) \ + ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \ + (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD) + +/* Sparse limit checks using a red zone before the actual limit. */ +#define MCLIM_REDZONE 64 +#define checkmclim(as) \ + if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as) + +static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as) +{ + lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE)); +} + +#ifdef RID_NUM_KREF +#define ra_iskref(ref) ((ref) < RID_NUM_KREF) +#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref))) +#define ra_krefk(as, ref) (as->krefk[(ref)]) + +static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k) +{ + IRRef ref = (IRRef)(r - RID_MIN_KREF); + as->krefk[ref] = k; + as->cost[r] = REGCOST(ref, ref); +} + +#else +#define ra_iskref(ref) 0 +#define ra_krefreg(ref) RID_MIN_GPR +#define ra_krefk(as, ref) 0 +#endif + +/* Arch-specific field offsets. */ +static const uint8_t field_ofs[IRFL__MAX+1] = { +#define FLOFS(name, ofs) (uint8_t)(ofs), +IRFLDEF(FLOFS) +#undef FLOFS + 0 +}; + +/* Define this if you want to run LuaJIT with Valgrind. */ +#ifdef LUAJIT_USE_VALGRIND +#include <valgrind/valgrind.h> +#define VG_INVALIDATE(p, sz) VALGRIND_DISCARD_TRANSLATIONS(p, sz) +#else +#define VG_INVALIDATE(p, sz) ((void)0) +#endif + +/* -- Target-specific instruction emitter --------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_emit_x86.h" +#elif LJ_TARGET_ARM +#include "lj_emit_arm.h" +#else +#error "Missing instruction emitter for target CPU" +#endif + +/* -- Register allocator debugging ---------------------------------------- */ + +/* #define LUAJIT_DEBUG_RA */ + +#ifdef LUAJIT_DEBUG_RA + +#include <stdio.h> +#include <stdarg.h> + +#define RIDNAME(name) #name, +static const char *const ra_regname[] = { + GPRDEF(RIDNAME) + FPRDEF(RIDNAME) + VRIDDEF(RIDNAME) + NULL +}; +#undef RIDNAME + +static char ra_dbg_buf[65536]; +static char *ra_dbg_p; +static char *ra_dbg_merge; +static MCode *ra_dbg_mcp; + +static void ra_dstart(void) +{ + ra_dbg_p = ra_dbg_buf; + ra_dbg_merge = NULL; + ra_dbg_mcp = NULL; +} + +static void ra_dflush(void) +{ + fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout); + ra_dstart(); +} + +static void ra_dprintf(ASMState *as, const char *fmt, ...) +{ + char *p; + va_list argp; + va_start(argp, fmt); + p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p; + ra_dbg_mcp = NULL; + p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS); + for (;;) { + const char *e = strchr(fmt, '$'); + if (e == NULL) break; + memcpy(p, fmt, (size_t)(e-fmt)); + p += e-fmt; + if (e[1] == 'r') { + Reg r = va_arg(argp, Reg) & RID_MASK; + if (r <= RID_MAX) { + const char *q; + for (q = ra_regname[r]; *q; q++) + *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; + } else { + *p++ = '?'; + lua_assert(0); + } + } else if (e[1] == 'f' || e[1] == 'i') { + IRRef ref; + if (e[1] == 'f') + ref = va_arg(argp, IRRef); + else + ref = va_arg(argp, IRIns *) - as->ir; + if (ref >= REF_BIAS) + p += sprintf(p, "%04d", ref - REF_BIAS); + else + p += sprintf(p, "K%03d", REF_BIAS - ref); + } else if (e[1] == 's') { + uint32_t slot = va_arg(argp, uint32_t); + p += sprintf(p, "[sp+0x%x]", sps_scale(slot)); + } else if (e[1] == 'x') { + p += sprintf(p, "%08x", va_arg(argp, int32_t)); + } else { + lua_assert(0); + } + fmt = e+2; + } + va_end(argp); + while (*fmt) + *p++ = *fmt++; + *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n'; + if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) { + fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout); + p = ra_dbg_buf; + } + ra_dbg_p = p; +} + +#define RA_DBG_START() ra_dstart() +#define RA_DBG_FLUSH() ra_dflush() +#define RA_DBG_REF() \ + do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \ + ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0) +#define RA_DBGX(x) ra_dprintf x + +#else +#define RA_DBG_START() ((void)0) +#define RA_DBG_FLUSH() ((void)0) +#define RA_DBG_REF() ((void)0) +#define RA_DBGX(x) ((void)0) +#endif + +/* -- Register allocator -------------------------------------------------- */ + +#define ra_free(as, r) rset_set(as->freeset, (r)) +#define ra_modified(as, r) rset_set(as->modset, (r)) +#define ra_weak(as, r) rset_set(as->weakset, (r)) +#define ra_noweak(as, r) rset_clear(as->weakset, (r)) + +#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s)) + +/* Setup register allocator. */ +static void ra_setup(ASMState *as) +{ + Reg r; + /* Initially all regs (except the stack pointer) are free for use. */ + as->freeset = RSET_INIT; + as->modset = RSET_EMPTY; + as->weakset = RSET_EMPTY; + as->phiset = RSET_EMPTY; + memset(as->phireg, 0, sizeof(as->phireg)); + for (r = RID_MIN_GPR; r < RID_MAX; r++) + as->cost[r] = REGCOST(~0u, 0u); +} + +/* Rematerialize constants. */ +static Reg ra_rematk(ASMState *as, IRRef ref) +{ + IRIns *ir; + Reg r; + if (ra_iskref(ref)) { + r = ra_krefreg(ref); + lua_assert(!rset_test(as->freeset, r)); + ra_free(as, r); + ra_modified(as, r); + emit_loadi(as, r, ra_krefk(as, ref)); + return r; + } + ir = IR(ref); + r = ir->r; + lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); + ra_free(as, r); + ra_modified(as, r); + ir->r = RID_INIT; /* Do not keep any hint. */ + RA_DBGX((as, "remat $i $r", ir, r)); +#if !LJ_SOFTFP + if (ir->o == IR_KNUM) { + emit_loadn(as, r, ir_knum(ir)); + } else +#endif + if (emit_canremat(REF_BASE) && ir->o == IR_BASE) { + ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ + emit_getgl(as, r, jit_base); + } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { + lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */ + emit_getgl(as, r, jit_L); +#if LJ_64 + } else if (ir->o == IR_KINT64) { + emit_loadu64(as, r, ir_kint64(ir)->u64); +#endif + } else { + lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || + ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); + emit_loadi(as, r, ir->i); + } + return r; +} + +/* Force a spill. Allocate a new spill slot if needed. */ +static int32_t ra_spill(ASMState *as, IRIns *ir) +{ + int32_t slot = ir->s; + if (!ra_hasspill(slot)) { + if (irt_is64(ir->t)) { + slot = as->evenspill; + as->evenspill += 2; + } else if (as->oddspill) { + slot = as->oddspill; + as->oddspill = 0; + } else { + slot = as->evenspill; + as->oddspill = slot+1; + as->evenspill += 2; + } + if (as->evenspill > 256) + lj_trace_err(as->J, LJ_TRERR_SPILLOV); + ir->s = (uint8_t)slot; + } + return sps_scale(slot); +} + +/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */ +static Reg ra_releasetmp(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + Reg r = ir->r; + lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); + ra_free(as, r); + ra_modified(as, r); + ir->r = RID_INIT; + return r; +} + +/* Restore a register (marked as free). Rematerialize or force a spill. */ +static Reg ra_restore(ASMState *as, IRRef ref) +{ + if (emit_canremat(ref)) { + return ra_rematk(as, ref); + } else { + IRIns *ir = IR(ref); + int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ + Reg r = ir->r; + lua_assert(ra_hasreg(r)); + ra_sethint(ir->r, r); /* Keep hint. */ + ra_free(as, r); + if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ + ra_modified(as, r); + RA_DBGX((as, "restore $i $r", ir, r)); + emit_spload(as, ir, r, ofs); + } + return r; + } +} + +/* Save a register to a spill slot. */ +static void ra_save(ASMState *as, IRIns *ir, Reg r) +{ + RA_DBGX((as, "save $i $r", ir, r)); + emit_spstore(as, ir, r, sps_scale(ir->s)); +} + +#define MINCOST(name) \ + if (rset_test(RSET_ALL, RID_##name) && \ + LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \ + cost = as->cost[RID_##name]; + +/* Evict the register with the lowest cost, forcing a restore. */ +static Reg ra_evict(ASMState *as, RegSet allow) +{ + IRRef ref; + RegCost cost = ~(RegCost)0; + lua_assert(allow != RSET_EMPTY); + if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { + GPRDEF(MINCOST) + } else { + FPRDEF(MINCOST) + } + ref = regcost_ref(cost); + lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins)); + /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ + if (!irref_isk(ref) && (as->weakset & allow)) { + IRIns *ir = IR(ref); + if (!rset_test(as->weakset, ir->r)) + ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]); + } + return ra_restore(as, ref); +} + +/* Pick any register (marked as free). Evict on-demand. */ +static Reg ra_pick(ASMState *as, RegSet allow) +{ + RegSet pick = as->freeset & allow; + if (!pick) + return ra_evict(as, allow); + else + return rset_picktop(pick); +} + +/* Get a scratch register (marked as free). */ +static Reg ra_scratch(ASMState *as, RegSet allow) +{ + Reg r = ra_pick(as, allow); + ra_modified(as, r); + RA_DBGX((as, "scratch $r", r)); + return r; +} + +/* Evict all registers from a set (if not free). */ +static void ra_evictset(ASMState *as, RegSet drop) +{ + as->modset |= drop; + drop &= ~as->freeset; + while (drop) { + Reg r = rset_pickbot(drop); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(drop, r); + checkmclim(as); + } +} + +/* Evict (rematerialize) all registers allocated to constants. */ +static void ra_evictk(ASMState *as) +{ + RegSet work = ~as->freeset & RSET_ALL; + while (work) { + Reg r = rset_pickbot(work); + IRRef ref = regcost_ref(as->cost[r]); + if (irref_isk(ref)) { + ra_rematk(as, ref); + checkmclim(as); + } + rset_clear(work, r); + } +} + +#ifdef RID_NUM_KREF +/* Allocate a register for a constant. */ +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow) +{ + /* First try to find a register which already holds the same constant. */ + RegSet pick, work = ~as->freeset & RSET_GPR; + Reg r; + while (work) { + IRRef ref; + r = rset_pickbot(work); + ref = regcost_ref(as->cost[r]); + if (emit_canremat(ref) && + k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i)) + return r; + rset_clear(work, r); + } + pick = as->freeset & allow; + if (pick) { + /* Constants should preferably get unmodified registers. */ + if ((pick & ~as->modset)) + pick &= ~as->modset; + r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ + } else { + r = ra_evict(as, allow); + } + RA_DBGX((as, "allock $x $r", k, r)); + ra_setkref(as, r, k); + rset_clear(as->freeset, r); + ra_noweak(as, r); + return r; +} + +/* Allocate a specific register for a constant. */ +static void ra_allockreg(ASMState *as, int32_t k, Reg r) +{ + Reg kr = ra_allock(as, k, RID2RSET(r)); + if (kr != r) { + IRIns irdummy; + irdummy.t.irt = IRT_INT; + ra_scratch(as, RID2RSET(r)); + emit_movrr(as, &irdummy, r, kr); + } +} +#else +#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k)) +#endif + +/* Allocate a register for ref from the allowed set of registers. +** Note: this function assumes the ref does NOT have a register yet! +** Picks an optimal register, sets the cost and marks the register as non-free. +*/ +static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + RegSet pick = as->freeset & allow; + Reg r; + lua_assert(ra_noreg(ir->r)); + if (pick) { + /* First check register hint from propagation or PHI. */ + if (ra_hashint(ir->r)) { + r = ra_gethint(ir->r); + if (rset_test(pick, r)) /* Use hint register if possible. */ + goto found; + /* Rematerialization is cheaper than missing a hint. */ + if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) { + ra_rematk(as, regcost_ref(as->cost[r])); + goto found; + } + RA_DBGX((as, "hintmiss $f $r", ref, r)); + } + /* Invariants should preferably get unmodified registers. */ + if (ref < as->loopref && !irt_isphi(ir->t)) { + if ((pick & ~as->modset)) + pick &= ~as->modset; + r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */ + } else { + /* We've got plenty of regs, so get callee-save regs if possible. */ + if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH)) + pick &= ~RSET_SCRATCH; + r = rset_picktop(pick); + } + } else { + r = ra_evict(as, allow); + } +found: + RA_DBGX((as, "alloc $f $r", ref, r)); + ir->r = (uint8_t)r; + rset_clear(as->freeset, r); + ra_noweak(as, r); + as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t)); + return r; +} + +/* Allocate a register on-demand. */ +static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow) +{ + Reg r = IR(ref)->r; + /* Note: allow is ignored if the register is already allocated. */ + if (ra_noreg(r)) r = ra_allocref(as, ref, allow); + ra_noweak(as, r); + return r; +} + +/* Rename register allocation and emit move. */ +static void ra_rename(ASMState *as, Reg down, Reg up) +{ + IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]); + IRIns *ir = IR(ref); + ir->r = (uint8_t)up; + as->cost[down] = 0; + lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR)); + lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up)); + ra_free(as, down); /* 'down' is free ... */ + ra_modified(as, down); + rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ + ra_noweak(as, up); + RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up)); + emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */ + if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */ + lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno); + ren = tref_ref(lj_ir_emit(as->J)); + as->ir = as->T->ir; /* The IR may have been reallocated. */ + IR(ren)->r = (uint8_t)down; + IR(ren)->s = SPS_NONE; + } +} + +/* Pick a destination register (marked as free). +** Caveat: allow is ignored if there's already a destination register. +** Use ra_destreg() to get a specific register. +*/ +static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow) +{ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + } else { + if (ra_hashint(dest) && rset_test(as->freeset, ra_gethint(dest))) { + dest = ra_gethint(dest); + ra_modified(as, dest); + RA_DBGX((as, "dest $r", dest)); + } else { + dest = ra_scratch(as, allow); + } + ir->r = dest; + } + if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest); + return dest; +} + +/* Force a specific destination register (marked as free). */ +static void ra_destreg(ASMState *as, IRIns *ir, Reg r) +{ + Reg dest = ra_dest(as, ir, RID2RSET(r)); + if (dest != r) { + ra_scratch(as, RID2RSET(r)); + emit_movrr(as, ir, dest, r); + } +} + +#if LJ_TARGET_X86ORX64 +/* Propagate dest register to left reference. Emit moves as needed. +** This is a required fixup step for all 2-operand machine instructions. +*/ +static void ra_left(ASMState *as, Reg dest, IRRef lref) +{ + IRIns *ir = IR(lref); + Reg left = ir->r; + if (ra_noreg(left)) { + if (irref_isk(lref)) { + if (ir->o == IR_KNUM) { + cTValue *tv = ir_knum(ir); + /* FP remat needs a load except for +0. Still better than eviction. */ + if (tvispzero(tv) || !(as->freeset & RSET_FPR)) { + emit_loadn(as, dest, tv); + return; + } +#if LJ_64 + } else if (ir->o == IR_KINT64) { + emit_loadu64(as, dest, ir_kint64(ir)->u64); + return; +#endif + } else { + lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || + ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); + emit_loadi(as, dest, ir->i); + return; + } + } + if (!ra_hashint(left) && !iscrossref(as, lref)) + ra_sethint(ir->r, dest); /* Propagate register hint. */ + left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR); + } + ra_noweak(as, left); + /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */ + if (dest != left) { + /* Use register renaming if dest is the PHI reg. */ + if (irt_isphi(ir->t) && as->phireg[dest] == lref) { + ra_modified(as, left); + ra_rename(as, left, dest); + } else { + emit_movrr(as, ir, dest, left); + } + } +} +#endif + +/* -- Snapshot handling --------- ----------------------------------------- */ + +/* Can we rematerialize a KNUM instead of forcing a spill? */ +static int asm_snap_canremat(ASMState *as) +{ + Reg r; + for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++) + if (irref_isk(regcost_ref(as->cost[r]))) + return 1; + return 0; +} + +/* Allocate register or spill slot for a ref that escapes to a snapshot. */ +static void asm_snap_alloc1(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (!ra_used(ir)) { + RegSet allow = (!LJ_SOFTFP && irt_isnum(ir->t)) ? RSET_FPR : RSET_GPR; + /* Get a weak register if we have a free one or can rematerialize. */ + if ((as->freeset & allow) || + (allow == RSET_FPR && asm_snap_canremat(as))) { + Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */ + if (!irt_isphi(ir->t)) + ra_weak(as, r); /* But mark it as weakly referenced. */ + checkmclim(as); + RA_DBGX((as, "snapreg $f $r", ref, ir->r)); + } else { + ra_spill(as, ir); /* Otherwise force a spill slot. */ + RA_DBGX((as, "snapspill $f $s", ref, ir->s)); + } + } +} + +/* Allocate refs escaping to a snapshot. */ +static void asm_snap_alloc(ASMState *as) +{ + SnapShot *snap = &as->T->snap[as->snapno]; + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + if (!irref_isk(ref)) { + asm_snap_alloc1(as, ref); + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) + asm_snap_alloc1(as, ref+1); + } + } +} + +/* All guards for a snapshot use the same exitno. This is currently the +** same as the snapshot number. Since the exact origin of the exit cannot +** be determined, all guards for the same snapshot must exit with the same +** RegSP mapping. +** A renamed ref which has been used in a prior guard for the same snapshot +** would cause an inconsistency. The easy way out is to force a spill slot. +*/ +static int asm_snap_checkrename(ASMState *as, IRRef ren) +{ + SnapShot *snap = &as->T->snap[as->snapno]; + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) { + IRIns *ir = IR(ref); + ra_spill(as, ir); /* Register renamed, so force a spill slot. */ + RA_DBGX((as, "snaprensp $f $s", ref, ir->s)); + return 1; /* Found. */ + } + } + return 0; /* Not found. */ +} + +/* Prepare snapshot for next guard instruction. */ +static void asm_snap_prep(ASMState *as) +{ + if (as->curins < as->snapref) { + do { + lua_assert(as->snapno != 0); + as->snapno--; + as->snapref = as->T->snap[as->snapno].ref; + } while (as->curins < as->snapref); + asm_snap_alloc(as); + as->snaprename = as->T->nins; + } else { + /* Process any renames above the highwater mark. */ + for (; as->snaprename < as->T->nins; as->snaprename++) { + IRIns *ir = IR(as->snaprename); + if (asm_snap_checkrename(as, ir->op1)) + ir->op2 = REF_BIAS-1; /* Kill rename. */ + } + } +} + +/* -- Miscellaneous helpers ----------------------------------------------- */ + +/* Collect arguments from CALL* and CARG instructions. */ +static void asm_collectargs(ASMState *as, IRIns *ir, + const CCallInfo *ci, IRRef *args) +{ + uint32_t n = CCI_NARGS(ci); + lua_assert(n <= CCI_NARGS_MAX); + if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; } + while (n-- > 1) { + ir = IR(ir->op1); + lua_assert(ir->o == IR_CARG); + args[n] = ir->op2 == REF_NIL ? 0 : ir->op2; + } + args[0] = ir->op1 == REF_NIL ? 0 : ir->op1; + lua_assert(IR(ir->op1)->o != IR_CARG); +} + +/* Reconstruct CCallInfo flags for CALLX*. */ +static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) +{ + uint32_t nargs = 0; + if (ir->op1 != REF_NIL) { /* Count number of arguments first. */ + IRIns *ira = IR(ir->op1); + nargs++; + while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); } + } + /* NYI: fastcall etc. */ + return (nargs | (ir->t.irt << CCI_OTSHIFT)); +} + +/* Get extent of the stack for a snapshot. */ +static BCReg asm_stack_extent(ASMState *as, SnapShot *snap, BCReg *ptopslot) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + BCReg baseslot = 0, topslot = 0; + /* Must check all frames to find topslot (outer can be larger than inner). */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + if ((sn & SNAP_FRAME)) { + IRIns *ir = IR(snap_ref(sn)); + GCfunc *fn = ir_kfunc(ir); + if (isluafunc(fn)) { + BCReg s = snap_slot(sn); + BCReg fs = s + funcproto(fn)->framesize; + if (fs > topslot) topslot = fs; + baseslot = s; + } + } + } + *ptopslot = topslot; + return baseslot; +} + +/* Calculate stack adjustment. */ +static int32_t asm_stack_adjust(ASMState *as) +{ + if (as->evenspill <= SPS_FIXED) + return 0; + return sps_scale(sps_align(as->evenspill)); +} + +/* Must match with hash*() in lj_tab.c. */ +static uint32_t ir_khash(IRIns *ir) +{ + uint32_t lo, hi; + if (irt_isstr(ir->t)) { + return ir_kstr(ir)->hash; + } else if (irt_isnum(ir->t)) { + lo = ir_knum(ir)->u32.lo; + hi = ir_knum(ir)->u32.hi << 1; + } else if (irt_ispri(ir->t)) { + lua_assert(!irt_isnil(ir->t)); + return irt_type(ir->t)-IRT_FALSE; + } else { + lua_assert(irt_isgcv(ir->t)); + lo = u32ptr(ir_kgc(ir)); + hi = lo + HASH_BIAS; + } + return hashrot(lo, hi); +} + +/* Flush instruction cache. */ +static void asm_cache_flush(MCode *start, MCode *end) +{ + VG_INVALIDATE(start, (char *)end-(char *)start); +#if LJ_TARGET_X86ORX64 + UNUSED(start); UNUSED(end); +#else +#if defined(__GNUC__) + __clear_cache(start, end); +#else +#error "Missing builtin to flush instruction cache" +#endif +#endif +} + +/* -- Allocations --------------------------------------------------------- */ + +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args); +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci); + +static void asm_snew(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new]; + IRRef args[3]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* const char *str */ + args[2] = ir->op2; /* size_t len */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); +} + +static void asm_tnew(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1]; + IRRef args[2]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* uint32_t ahsize */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCtab * */ + asm_gencall(as, ci, args); + ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1)); +} + +static void asm_tdup(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup]; + IRRef args[2]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* const GCtab *kt */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCtab * */ + asm_gencall(as, ci, args); +} + +/* -- PHI and loop handling ----------------------------------------------- */ + +/* Break a PHI cycle by renaming to a free register (evict if needed). */ +static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby, + RegSet allow) +{ + RegSet candidates = blocked & allow; + if (candidates) { /* If this register file has candidates. */ + /* Note: the set for ra_pick cannot be empty, since each register file + ** has some registers never allocated to PHIs. + */ + Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */ + if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */ + candidates = candidates & ~blockedby; + down = rset_picktop(candidates); /* Pick candidate PHI register. */ + ra_rename(as, down, up); /* And rename it to the free register. */ + } +} + +/* PHI register shuffling. +** +** The allocator tries hard to preserve PHI register assignments across +** the loop body. Most of the time this loop does nothing, since there +** are no register mismatches. +** +** If a register mismatch is detected and ... +** - the register is currently free: rename it. +** - the register is blocked by an invariant: restore/remat and rename it. +** - Otherwise the register is used by another PHI, so mark it as blocked. +** +** The renames are order-sensitive, so just retry the loop if a register +** is marked as blocked, but has been freed in the meantime. A cycle is +** detected if all of the blocked registers are allocated. To break the +** cycle rename one of them to a free register and retry. +** +** Note that PHI spill slots are kept in sync and don't need to be shuffled. +*/ +static void asm_phi_shuffle(ASMState *as) +{ + RegSet work; + + /* Find and resolve PHI register mismatches. */ + for (;;) { + RegSet blocked = RSET_EMPTY; + RegSet blockedby = RSET_EMPTY; + RegSet phiset = as->phiset; + while (phiset) { /* Check all left PHI operand registers. */ + Reg r = rset_pickbot(phiset); + IRIns *irl = IR(as->phireg[r]); + Reg left = irl->r; + if (r != left) { /* Mismatch? */ + if (!rset_test(as->freeset, r)) { /* PHI register blocked? */ + IRRef ref = regcost_ref(as->cost[r]); + /* Blocked by other PHI (w/reg)? */ + if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) { + rset_set(blocked, r); + if (ra_hasreg(left)) + rset_set(blockedby, left); + left = RID_NONE; + } else { /* Otherwise grab register from invariant. */ + ra_restore(as, ref); + checkmclim(as); + } + } + if (ra_hasreg(left)) { + ra_rename(as, left, r); + checkmclim(as); + } + } + rset_clear(phiset, r); + } + if (!blocked) break; /* Finished. */ + if (!(as->freeset & blocked)) { /* Break cycles if none are free. */ + asm_phi_break(as, blocked, blockedby, RSET_GPR); + if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR); + checkmclim(as); + } /* Else retry some more renames. */ + } + + /* Restore/remat invariants whose registers are modified inside the loop. */ + work = as->modset & ~(as->freeset | as->phiset); + while (work) { + Reg r = rset_pickbot(work); + ra_restore(as, regcost_ref(as->cost[r])); + rset_clear(work, r); + checkmclim(as); + } + + /* Allocate and save all unsaved PHI regs and clear marks. */ + work = as->phiset; + while (work) { + Reg r = rset_picktop(work); + IRRef lref = as->phireg[r]; + IRIns *ir = IR(lref); + if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */ + irt_clearmark(ir->t); /* Handled here, so clear marker now. */ + ra_alloc1(as, lref, RID2RSET(r)); + ra_save(as, ir, r); /* Save to spill slot inside the loop. */ + checkmclim(as); + } + rset_clear(work, r); + } +} + +/* Emit renames for left PHIs which are only spilled outside the loop. */ +static void asm_phi_fixup(ASMState *as) +{ + RegSet work = as->phiset; + while (work) { + Reg r = rset_picktop(work); + IRRef lref = as->phireg[r]; + IRIns *ir = IR(lref); + /* Left PHI gained a spill slot before the loop? */ + if (irt_ismarked(ir->t) && ra_hasspill(ir->s)) { + IRRef ren; + lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno); + ren = tref_ref(lj_ir_emit(as->J)); + as->ir = as->T->ir; /* The IR may have been reallocated. */ + IR(ren)->r = (uint8_t)r; + IR(ren)->s = SPS_NONE; + } + irt_clearmark(ir->t); /* Always clear marker. */ + rset_clear(work, r); + } +} + +/* Setup right PHI reference. */ +static void asm_phi(ASMState *as, IRIns *ir) +{ + RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & + ~as->phiset; + RegSet afree = (as->freeset & allow); + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + /* Spill slot shuffling is not implemented yet (but rarely needed). */ + if (ra_hasspill(irl->s) || ra_hasspill(irr->s)) + lj_trace_err(as->J, LJ_TRERR_NYIPHI); + /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */ + if ((afree & (afree-1))) { /* Two or more free registers? */ + Reg r; + if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */ + r = ra_allocref(as, ir->op2, allow); + } else { /* Duplicate right PHI, need a copy (rare). */ + r = ra_scratch(as, allow); + emit_movrr(as, irr, r, irr->r); + } + ir->r = (uint8_t)r; + rset_set(as->phiset, r); + as->phireg[r] = (IRRef1)ir->op1; + irt_setmark(irl->t); /* Marks left PHIs _with_ register. */ + if (ra_noreg(irl->r)) + ra_sethint(irl->r, r); /* Set register hint for left PHI. */ + } else { /* Otherwise allocate a spill slot. */ + /* This is overly restrictive, but it triggers only on synthetic code. */ + if (ra_hasreg(irl->r) || ra_hasreg(irr->r)) + lj_trace_err(as->J, LJ_TRERR_NYIPHI); + ra_spill(as, ir); + irl->s = irr->s = ir->s; /* Sync left/right PHI spill slots. */ + } +} + +static void asm_gc_check(ASMState *as); +static void asm_loop_fixup(ASMState *as); + +/* Middle part of a loop. */ +static void asm_loop(ASMState *as) +{ + /* LOOP is a guard, so the snapno is up to date. */ + as->loopsnapno = as->snapno; + if (as->gcsteps) + asm_gc_check(as); + /* LOOP marks the transition from the variant to the invariant part. */ + as->flagmcp = as->invmcp = NULL; + as->sectref = 0; + if (!neverfuse(as)) as->fuseref = 0; + asm_phi_shuffle(as); + asm_loop_fixup(as); + as->mcloop = as->mcp; + RA_DBGX((as, "===== LOOP =====")); + if (!as->realign) RA_DBG_FLUSH(); +} + +/* -- Target-specific assembler ------------------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_asm_x86.h" +#elif LJ_TARGET_ARM +#include "lj_asm_arm.h" +#else +#error "Missing instruction emitter for target CPU" +#endif + +/* -- Head of trace ------------------------------------------------------- */ + +/* Head of a root trace. */ +static void asm_head_root(ASMState *as) +{ + int32_t spadj; + asm_head_root_base(as); + emit_setvmstate(as, (int32_t)as->T->traceno); + spadj = asm_stack_adjust(as); + as->T->spadjust = (uint16_t)spadj; + emit_spsub(as, spadj); + /* Root traces assume a checked stack for the starting proto. */ + as->T->topslot = gcref(as->T->startpt)->pt.framesize; +} + +/* Get RegSP for parent slot. */ +static LJ_AINLINE RegSP asm_head_parentrs(ASMState *as, IRIns *ir) +{ +#if LJ_SOFTFP + if (ir->o == IR_HIOP) return as->parentmaphi[(ir-1)->op1]; +#endif + return as->parentmap[ir->op1]; +} + +/* Head of a side trace. +** +** The current simplistic algorithm requires that all slots inherited +** from the parent are live in a register between pass 2 and pass 3. This +** avoids the complexity of stack slot shuffling. But of course this may +** overflow the register set in some cases and cause the dreaded error: +** "NYI: register coalescing too complex". A refined algorithm is needed. +*/ +static void asm_head_side(ASMState *as) +{ + IRRef1 sloadins[RID_MAX]; + RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */ + RegSet live = RSET_EMPTY; /* Live parent registers. */ + IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */ + int32_t spadj, spdelta; + int pass2 = 0; + int pass3 = 0; + IRRef i; + + allow = asm_head_side_base(as, irp, allow); + + /* Scan all parent SLOADs and collect register dependencies. */ + for (i = as->stopins; i > REF_BASE; i--) { + IRIns *ir = IR(i); + RegSP rs; + lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || + (LJ_SOFTFP && ir->o == IR_HIOP)); + rs = asm_head_parentrs(as, ir); + if (ra_hasreg(ir->r)) { + rset_clear(allow, ir->r); + if (ra_hasspill(ir->s)) + ra_save(as, ir, ir->r); + } else if (ra_hasspill(ir->s)) { + irt_setmark(ir->t); + pass2 = 1; + } + if (ir->r == rs) { /* Coalesce matching registers right now. */ + ra_free(as, ir->r); + } else if (ra_hasspill(regsp_spill(rs))) { + if (ra_hasreg(ir->r)) + pass3 = 1; + } else if (ra_used(ir)) { + sloadins[rs] = (IRRef1)i; + rset_set(live, rs); /* Block live parent register. */ + } + } + + /* Calculate stack frame adjustment. */ + spadj = asm_stack_adjust(as); + spdelta = spadj - (int32_t)as->parent->spadjust; + if (spdelta < 0) { /* Don't shrink the stack frame. */ + spadj = (int32_t)as->parent->spadjust; + spdelta = 0; + } + as->T->spadjust = (uint16_t)spadj; + + /* Reload spilled target registers. */ + if (pass2) { + for (i = as->stopins; i > REF_BASE; i--) { + IRIns *ir = IR(i); + if (irt_ismarked(ir->t)) { + RegSet mask; + Reg r; + RegSP rs; + irt_clearmark(ir->t); + rs = asm_head_parentrs(as, ir); + if (!ra_hasspill(regsp_spill(rs))) + ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */ + else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s)) + continue; /* Same spill slot, do nothing. */ + mask = ((!LJ_SOFTFP && irt_isnum(ir->t)) ? RSET_FPR : RSET_GPR) & allow; + if (mask == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + r = ra_allocref(as, i, mask); + ra_save(as, ir, r); + rset_clear(allow, r); + if (r == rs) { /* Coalesce matching registers right now. */ + ra_free(as, r); + rset_clear(live, r); + } else if (ra_hasspill(regsp_spill(rs))) { + pass3 = 1; + } + checkmclim(as); + } + } + } + + /* Store trace number and adjust stack frame relative to the parent. */ + emit_setvmstate(as, (int32_t)as->T->traceno); + emit_spsub(as, spdelta); + +#if !LJ_TARGET_X86ORX64 + /* Restore BASE register from parent spill slot. */ + if (ra_hasspill(irp->s)) + emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s)); +#endif + + /* Restore target registers from parent spill slots. */ + if (pass3) { + RegSet work = ~as->freeset & RSET_ALL; + while (work) { + Reg r = rset_pickbot(work); + IRIns *ir = IR(regcost_ref(as->cost[r])); + RegSP rs = asm_head_parentrs(as, ir); + rset_clear(work, r); + if (ra_hasspill(regsp_spill(rs))) { + int32_t ofs = sps_scale(regsp_spill(rs)); + ra_free(as, r); + emit_spload(as, ir, r, ofs); + checkmclim(as); + } + } + } + + /* Shuffle registers to match up target regs with parent regs. */ + for (;;) { + RegSet work; + + /* Repeatedly coalesce free live registers by moving to their target. */ + while ((work = as->freeset & live) != RSET_EMPTY) { + Reg rp = rset_pickbot(work); + IRIns *ir = IR(sloadins[rp]); + rset_clear(live, rp); + rset_clear(allow, rp); + ra_free(as, ir->r); + emit_movrr(as, ir, ir->r, rp); + checkmclim(as); + } + + /* We're done if no live registers remain. */ + if (live == RSET_EMPTY) + break; + + /* Break cycles by renaming one target to a temp. register. */ + if (live & RSET_GPR) { + RegSet tmpset = as->freeset & ~live & allow & RSET_GPR; + if (tmpset == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset)); + } + if (!LJ_SOFTFP && (live & RSET_FPR)) { + RegSet tmpset = as->freeset & ~live & allow & RSET_FPR; + if (tmpset == RSET_EMPTY) + lj_trace_err(as->J, LJ_TRERR_NYICOAL); + ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset)); + } + checkmclim(as); + /* Continue with coalescing to fix up the broken cycle(s). */ + } + + /* Inherit top stack slot already checked by parent trace. */ + as->T->topslot = as->parent->topslot; + if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */ + as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */ + /* Reuse the parent exit in the context of the parent trace. */ + asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, as->J->exitno); + } +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Link to another trace. */ +static void asm_tail_link(ASMState *as) +{ + SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */ + SnapShot *snap = &as->T->snap[snapno]; + BCReg baseslot = asm_stack_extent(as, snap, &as->topslot); + + checkmclim(as); + ra_allocref(as, REF_BASE, RID2RSET(RID_BASE)); + + if (as->T->link == TRACE_INTERP) { + /* Setup fixed registers for exit to interpreter. */ + const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]); + int32_t mres; + if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */ + BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins; + if (bc_isret(bc_op(*retpc))) + pc = retpc; + } + ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH); + ra_allockreg(as, i32ptr(pc), RID_LPC); + mres = (int32_t)(snap->nslots - baseslot); + switch (bc_op(*pc)) { + case BC_CALLM: case BC_CALLMT: + mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break; + case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break; + case BC_TSETM: mres -= (int32_t)bc_a(*pc); break; + default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break; + } + ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */ + } else if (baseslot) { + /* Save modified BASE for linking to trace with higher start frame. */ + emit_setgl(as, RID_BASE, jit_base); + } + emit_addptr(as, RID_BASE, 8*(int32_t)baseslot); + + /* Sync the interpreter state with the on-trace state. */ + asm_stack_restore(as, snap); + + /* Root traces that grow the stack need to check the stack at the end. */ + if (!as->parent && as->topslot) + asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno); +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Clear reg/sp for all instructions and add register hints. */ +static void asm_setup_regsp(ASMState *as) +{ + GCtrace *T = as->T; + IRRef i, nins; + int inloop; +#if LJ_TARGET_ARM + uint32_t rload = 0xa6402a64; +#endif + + ra_setup(as); + + /* Clear reg/sp for constants. */ + for (i = T->nk; i < REF_BIAS; i++) + IR(i)->prev = REGSP_INIT; + + /* REF_BASE is used for implicit references to the BASE register. */ + IR(REF_BASE)->prev = REGSP_HINT(RID_BASE); + + nins = T->nins; + if (IR(nins-1)->o == IR_RENAME) { + do { nins--; } while (IR(nins-1)->o == IR_RENAME); + T->nins = nins; /* Remove any renames left over from ASM restart. */ + } + as->snaprename = nins; + as->snapref = nins; + as->snapno = T->nsnap; + + as->stopins = REF_BASE; + as->orignins = nins; + as->curins = nins; + + inloop = 0; + as->evenspill = SPS_FIRST; + for (i = REF_FIRST; i < nins; i++) { + IRIns *ir = IR(i); + switch (ir->o) { + case IR_LOOP: + inloop = 1; + break; + /* Set hints for slot loads from a parent trace. */ + case IR_SLOAD: + if ((ir->op2 & IRSLOAD_PARENT)) { + RegSP rs = as->parentmap[ir->op1]; + lua_assert(regsp_used(rs)); + as->stopins = i; + if (!ra_hasspill(regsp_spill(rs)) && ra_hasreg(regsp_reg(rs))) { + ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs)); + continue; + } + } +#if LJ_TARGET_ARM + if ((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP) { + ir->prev = (uint16_t)REGSP_HINT((rload & 15)); + rload = lj_ror(rload, 4); + continue; + } +#endif + break; +#if LJ_TARGET_ARM + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + ir->prev = (uint16_t)REGSP_HINT((rload & 15)); + rload = lj_ror(rload, 4); + continue; +#endif + case IR_CALLXS: { + CCallInfo ci; + ci.flags = asm_callx_flags(as, ir); + ir->prev = asm_setup_call_slots(as, ir, &ci); + if (inloop) + as->modset |= RSET_SCRATCH; + continue; + } + case IR_CALLN: case IR_CALLL: case IR_CALLS: { + const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; + ir->prev = asm_setup_call_slots(as, ir, ci); + if (inloop) + as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ? + (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH; + continue; + } +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) + case IR_HIOP: + switch ((ir-1)->o) { +#if LJ_SOFTFP + case IR_SLOAD: + if (((ir-1)->op2 & IRSLOAD_PARENT)) { + RegSP rs = as->parentmaphi[(ir-1)->op1]; + lua_assert(regsp_used(rs)); + as->stopins = i; + if (!ra_hasspill(regsp_spill(rs)) && ra_hasreg(regsp_reg(rs))) { + ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs)); + continue; + } + } +#if LJ_TARGET_ARM + /* fallthrough */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + if (ra_hashint((ir-1)->r)) { + ir->prev = (ir-1)->prev + 1; + continue; + } +#endif + break; +#endif + case IR_CALLN: case IR_CALLXS: +#if LJ_SOFTFP + case IR_MIN: case IR_MAX: +#endif + ir->prev = REGSP_HINT(RID_RETHI); + continue; + default: + break; + } + break; +#endif +#if LJ_SOFTFP + case IR_MIN: case IR_MAX: + if ((ir+1)->o != IR_HIOP) break; + /* fallthrough */ +#endif + /* C calls evict all scratch regs and return results in RID_RET. */ + case IR_SNEW: case IR_XSNEW: case IR_NEWREF: + if (REGARG_NUMGPR < 3 && as->evenspill < 3) + as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */ + case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR: + ir->prev = REGSP_HINT(RID_RET); + if (inloop) + as->modset = RSET_SCRATCH; + continue; + case IR_STRTO: case IR_OBAR: + if (inloop) + as->modset = RSET_SCRATCH; + break; + case IR_POW: + if (!LJ_SOFTFP && irt_isnum(ir->t)) { +#if LJ_TARGET_X86ORX64 + ir->prev = REGSP_HINT(RID_XMM0); + if (inloop) + as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); +#else + ir->prev = REGSP_HINT(RID_FPRET); + if (inloop) + as->modset |= RSET_SCRATCH; +#endif + continue; + } + /* fallthrough for integer POW */ + case IR_DIV: case IR_MOD: +#if LJ_64 && LJ_HASFFI + if (!irt_isnum(ir->t)) { + ir->prev = REGSP_HINT(RID_RET); + if (inloop) + as->modset |= (RSET_SCRATCH & RSET_GPR); + continue; + } +#endif + break; + case IR_FPMATH: +#if LJ_TARGET_X86ORX64 + if (ir->op2 == IRFPM_EXP2) { /* May be joined to lj_vm_pow_sse. */ + ir->prev = REGSP_HINT(RID_XMM0); +#if !LJ_64 + if (as->evenspill < 4) /* Leave room for 16 byte scratch area. */ + as->evenspill = 4; +#endif + if (inloop) + as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); + continue; + } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) { + ir->prev = REGSP_HINT(RID_XMM0); + if (inloop) + as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); + continue; + } + break; +#else + ir->prev = REGSP_HINT(RID_FPRET); + if (inloop) + as->modset |= RSET_SCRATCH; + continue; +#endif +#if LJ_TARGET_X86ORX64 + /* Non-constant shift counts need to be in RID_ECX on x86/x64. */ + case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: + if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) { + IR(ir->op2)->r = REGSP_HINT(RID_ECX); + if (inloop) + rset_set(as->modset, RID_ECX); + } + break; +#endif + /* Do not propagate hints across type conversions. */ + case IR_TOBIT: + break; + case IR_CONV: + if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM || + (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT) + break; + /* fallthrough */ + default: + /* Propagate hints across likely 'op reg, imm' or 'op reg'. */ + if (irref_isk(ir->op2) && !irref_isk(ir->op1)) { + ir->prev = IR(ir->op1)->prev; + continue; + } + break; + } + ir->prev = REGSP_INIT; + } + if ((as->evenspill & 1)) + as->oddspill = as->evenspill++; + else + as->oddspill = 0; +} + +/* -- Assembler core ------------------------------------------------------ */ + +/* Assemble a trace. */ +void lj_asm_trace(jit_State *J, GCtrace *T) +{ + ASMState as_; + ASMState *as = &as_; + + /* Ensure an initialized instruction beyond the last one for HIOP checks. */ + J->cur.nins = lj_ir_nextins(J); + J->cur.ir[J->cur.nins].o = IR_NOP; + + /* Setup initial state. Copy some fields to reduce indirections. */ + as->J = J; + as->T = T; + as->ir = T->ir; + as->flags = J->flags; + as->loopref = J->loopref; + as->realign = NULL; + as->loopinv = 0; + if (J->parent) { + as->parent = traceref(J, J->parent); + lj_snap_regspmap(as->parentmap, as->parent, J->exitno, 0); +#if LJ_SOFTFP + lj_snap_regspmap(as->parentmaphi, as->parent, J->exitno, 1); +#endif + } else { + as->parent = NULL; + } + as->mctop = lj_mcode_reserve(J, &as->mcbot); /* Reserve MCode memory. */ + as->mcp = as->mctop; + as->mclim = as->mcbot + MCLIM_REDZONE; + asm_setup_target(as); + + do { + as->mcp = as->mctop; + as->curins = T->nins; + RA_DBG_START(); + RA_DBGX((as, "===== STOP =====")); + + /* General trace setup. Emit tail of trace. */ + asm_tail_prep(as); + as->mcloop = NULL; + as->flagmcp = NULL; + as->topslot = 0; + as->gcsteps = 0; + as->sectref = as->loopref; + as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED; + asm_setup_regsp(as); + if (!as->loopref) + asm_tail_link(as); + + /* Assemble a trace in linear backwards order. */ + for (as->curins--; as->curins > as->stopins; as->curins--) { + IRIns *ir = IR(as->curins); + lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */ + if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) + continue; /* Dead-code elimination can be soooo easy. */ + if (irt_isguard(ir->t)) + asm_snap_prep(as); + RA_DBG_REF(); + checkmclim(as); + asm_ir(as, ir); + } + } while (as->realign); /* Retry in case the MCode needs to be realigned. */ + + /* Emit head of trace. */ + RA_DBG_REF(); + checkmclim(as); + if (as->gcsteps) { + as->curins = as->T->snap[0].ref; + asm_snap_prep(as); /* The GC check is a guard. */ + asm_gc_check(as); + } + ra_evictk(as); + if (as->parent) + asm_head_side(as); + else + asm_head_root(as); + asm_phi_fixup(as); + + RA_DBGX((as, "===== START ====")); + RA_DBG_FLUSH(); + if (as->freeset != RSET_ALL) + lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */ + + /* Set trace entry point before fixing up tail to allow link to self. */ + T->mcode = as->mcp; + T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0; + if (!as->loopref) + asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */ + T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp); + asm_cache_flush(T->mcode, as->mctop); +} + +#undef IR + +#endif diff --git a/src/luajit2/src/lj_asm.h b/src/luajit2/src/lj_asm.h new file mode 100644 index 0000000000..64240efa63 --- /dev/null +++ b/src/luajit2/src/lj_asm.h @@ -0,0 +1,17 @@ +/* +** IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ASM_H +#define _LJ_ASM_H + +#include "lj_jit.h" + +#if LJ_HASJIT +LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T); +LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, + MCode *target); +#endif + +#endif diff --git a/src/luajit2/src/lj_asm_arm.h b/src/luajit2/src/lj_asm_arm.h new file mode 100644 index 0000000000..31b300bf72 --- /dev/null +++ b/src/luajit2/src/lj_asm_arm.h @@ -0,0 +1,1860 @@ +/* +** ARM IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Register allocator extensions --------------------------------------- */ + +/* Allocate a register with a hint. */ +static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) +{ + Reg r = IR(ref)->r; + if (ra_noreg(r)) { + if (!ra_hashint(r) && !iscrossref(as, ref)) + ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ + r = ra_allocref(as, ref, allow); + } + ra_noweak(as, r); + return r; +} + +/* Similar to ra_left, except we override any hints. */ +static void ra_leftov(ASMState *as, Reg dest, IRRef lref) +{ + IRIns *ir = IR(lref); + Reg left = ir->r; + if (ra_noreg(left)) { + ra_sethint(ir->r, dest); /* Propagate register hint. */ + left = ra_allocref(as, lref, RSET_GPR); + } + ra_noweak(as, left); + if (dest != left) { + /* Use register renaming if dest is the PHI reg. */ + if (irt_isphi(ir->t) && as->phireg[dest] == lref) { + ra_modified(as, left); + ra_rename(as, left, dest); + } else { + emit_movrr(as, ir, dest, left); + } + } +} + +/* Allocate a scratch register pair. */ +static Reg ra_scratchpair(ASMState *as, RegSet allow) +{ + RegSet pick1 = as->freeset & allow; + RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN; + Reg r; + if (pick2) { + r = rset_picktop(pick2); + } else { + RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN; + if (pick) { + r = rset_picktop(pick); + ra_restore(as, regcost_ref(as->cost[r+1])); + } else { + pick = pick1 & (allow << 1) & RSET_GPRODD; + if (pick) { + r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1])); + } else { + r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN); + ra_restore(as, regcost_ref(as->cost[r+1])); + } + } + } + lua_assert(rset_test(RSET_GPREVEN, r)); + ra_modified(as, r); + ra_modified(as, r+1); + RA_DBGX((as, "scratchpair $r $r", r, r+1)); + return r; +} + +/* Force a RID_RET/RID_RETHI destination register pair (marked as free). */ +static void ra_destpair(ASMState *as, IRIns *ir) +{ + Reg destlo = ir->r, desthi = (ir+1)->r; + /* First spill unrelated refs blocking the destination registers. */ + if (!rset_test(as->freeset, RID_RET) && + destlo != RID_RET && desthi != RID_RET) + ra_restore(as, regcost_ref(as->cost[RID_RET])); + if (!rset_test(as->freeset, RID_RETHI) && + destlo != RID_RETHI && desthi != RID_RETHI) + ra_restore(as, regcost_ref(as->cost[RID_RETHI])); + /* Next free the destination registers (if any). */ + if (ra_hasreg(destlo)) { + ra_free(as, destlo); + ra_modified(as, destlo); + } else { + destlo = RID_RET; + } + if (ra_hasreg(desthi)) { + ra_free(as, desthi); + ra_modified(as, desthi); + } else { + desthi = RID_RETHI; + } + /* Check for conflicts and shuffle the registers as needed. */ + if (destlo == RID_RETHI) { + if (desthi == RID_RET) { + emit_movrr(as, ir, RID_RETHI, RID_TMP); + emit_movrr(as, ir, RID_RET, RID_RETHI); + emit_movrr(as, ir, RID_TMP, RID_RET); + } else { + emit_movrr(as, ir, RID_RETHI, RID_RET); + if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); + } + } else if (desthi == RID_RET) { + emit_movrr(as, ir, RID_RET, RID_RETHI); + if (destlo != RID_RET) emit_movrr(as, ir, destlo, RID_RET); + } else { + if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI); + if (destlo != RID_RET) emit_movrr(as, ir, destlo, RID_RET); + } + /* Restore spill slots (if any). */ + if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI); + if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RET); +} + +/* -- Guard handling ------------------------------------------------------ */ + +/* Generate an exit stub group at the bottom of the reserved MCode memory. */ +static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) +{ + MCode *mxp = as->mcbot; + int i; + if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop) + asm_mclimit(as); + /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */ + *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP); + *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu); + mxp++; + *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */ + *mxp++ = group*EXITSTUBS_PER_GROUP; + for (i = 0; i < EXITSTUBS_PER_GROUP; i++) + *mxp++ = ARMI_B|((-6-i)&0x00ffffffu); + lj_mcode_commitbot(as->J, mxp); + as->mcbot = mxp; + as->mclim = as->mcbot + MCLIM_REDZONE; + return mxp - EXITSTUBS_PER_GROUP; +} + +/* Setup all needed exit stubs. */ +static void asm_exitstub_setup(ASMState *as, ExitNo nexits) +{ + ExitNo i; + if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) + lj_trace_err(as->J, LJ_TRERR_SNAPOV); + for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) + if (as->J->exitstubgroup[i] == NULL) + as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); +} + +/* Emit conditional branch to exit for guard. */ +static void asm_guardcc(ASMState *as, ARMCC cc) +{ + MCode *target = exitstub_addr(as->J, as->snapno); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->loopinv = 1; + *p = ARMI_BL | ((target-p-2) & 0x00ffffffu); + emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1); + return; + } + emit_branch(as, ARMF_CC(ARMI_BL, cc), target); +} + +/* -- Operand fusion ------------------------------------------------------ */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if there's no conflicting instruction between curins and ref. */ +static int noconflict(ASMState *as, IRRef ref, IROp conflict) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + return 1; /* Ok, no conflict. */ +} + +/* Fuse the array base of colocated arrays. */ +static int32_t asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && + noconflict(as, ref, IR_NEWREF)) + return (int32_t)sizeof(GCtab); + return 0; +} + +/* Fuse array/hash/upvalue reference into register+offset operand. */ +static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + if (ir->o == IR_AREF) { + if (mayfuse(as, ref)) { + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + ofs += 8*IR(ir->op2)->i; + if (ofs > -4096 && ofs < 4096) { + *ofsp = ofs; + return ra_alloc1(as, refa, allow); + } + } + } + } else if (ir->o == IR_HREFK) { + if (mayfuse(as, ref)) { + int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + if (ofs < 4096) { + *ofsp = ofs; + return ra_alloc1(as, ir->op1, allow); + } + } + } else if (ir->o == IR_UREFC) { + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); + *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */ + return ra_allock(as, (ofs & ~255), allow); + } + } + } + *ofsp = 0; + return ra_alloc1(as, ref, allow); +} + +/* Fuse m operand into arithmetic/logic instructions. */ +static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_hasreg(ir->r)) { + ra_noweak(as, ir->r); + return ARMF_M(ir->r); + } else if (irref_isk(ref)) { + uint32_t k = emit_isk12(ai, ir->i); + if (k) + return k; + } else if (mayfuse(as, ref)) { + if (ir->o >= IR_BSHL && ir->o <= IR_BROR) { + Reg m = ra_alloc1(as, ir->op1, allow); + ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL : + ir->o == IR_BSHR ? ARMSH_LSR : + ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR; + if (irref_isk(ir->op2)) { + return m | ARMF_SH(sh, (IR(ir->op2)->i & 31)); + } else { + Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m)); + return m | ARMF_RSH(sh, s); + } + } else if (ir->o == IR_ADD && ir->op1 == ir->op2) { + Reg m = ra_alloc1(as, ir->op1, allow); + return m | ARMF_SH(ARMSH_LSL, 1); + } + } + return ra_allocref(as, ref, allow); +} + +/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */ +static IRRef asm_fuselsl2(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL && + irref_isk(ir->op2) && IR(ir->op2)->i == 2) + return ir->op1; + return 0; /* No fusion. */ +} + +/* Fuse XLOAD/XSTORE reference into load/store operand. */ +static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref, + RegSet allow) +{ + IRIns *ir = IR(ref); + int32_t ofs = 0; + Reg base; + if (ra_noreg(ir->r) && mayfuse(as, ref)) { + int32_t lim = (ai & 0x04000000) ? 4096 : 256; + if (ir->o == IR_ADD) { + if (irref_isk(ir->op2) && (ofs = IR(ir->op2)->i) > -lim && ofs < lim) { + ref = ir->op1; + } else { + IRRef lref = ir->op1, rref = ir->op2; + Reg rn, rm; + if ((ai & 0x04000000)) { + IRRef sref = asm_fuselsl2(as, rref); + if (sref) { + rref = sref; + ai |= ARMF_SH(ARMSH_LSL, 2); + } else if ((sref = asm_fuselsl2(as, lref)) != 0) { + lref = rref; + rref = sref; + ai |= ARMF_SH(ARMSH_LSL, 2); + } + } + rn = ra_alloc1(as, lref, allow); + rm = ra_alloc1(as, rref, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) ai |= ARMI_LS_R; + emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); + return; + } + } else if (ir->o == IR_STRREF) { + ofs = (int32_t)sizeof(GCstr); + if (irref_isk(ir->op2)) { + ofs += IR(ir->op2)->i; + ref = ir->op1; + } else if (irref_isk(ir->op1)) { + ofs += IR(ir->op1)->i; + ref = ir->op2; + } else { + /* NYI: Fuse ADD with constant. */ + Reg rn = ra_alloc1(as, ir->op1, allow); + uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) + emit_lso(as, ai, rd, rd, ofs); + else + emit_lsox(as, ai, rd, rd, ofs); + emit_dn(as, ARMI_ADD^m, rd, rn); + return; + } + if (ofs <= -lim || ofs >= lim) { + Reg rn = ra_alloc1(as, ref, allow); + Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn)); + if ((ai & 0x04000000)) ai |= ARMI_LS_R; + emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm); + return; + } + } + } + base = ra_alloc1(as, ref, allow); + if ((ai & 0x04000000)) + emit_lso(as, ai, rd, base, ofs); + else + emit_lsox(as, ai, rd, base, ofs); +} + +/* -- Calls --------------------------------------------------------------- */ + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_NARGS(ci); + int32_t ofs = 0; + Reg gpr = REGARG_FIRSTGPR; + if ((void *)ci->func) + emit_call(as, (void *)ci->func); + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + IRIns *ir = IR(ref); + if (gpr <= REGARG_LASTGPR) { + lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ + if (ref) ra_leftov(as, gpr, ref); + gpr++; + } else { + if (ref) { + Reg r = ra_alloc1(as, ref, RSET_GPR); + emit_spstore(as, ir, r, ofs); + } + ofs += 4; + } + } +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + int hiop = ((ir+1)->o == IR_HIOP); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + if (hiop && ra_hasreg((ir+1)->r)) + rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + lua_assert(!irt_ispri(ir->t)); + if (hiop) + ra_destpair(as, ir); + else + ra_destreg(as, ir, RID_RET); + } + UNUSED(ci); +} + +static void asm_call(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX]; + const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; + asm_collectargs(as, ir, ci, args); + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX]; + CCallInfo ci; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); + if (irref_isk(ir->op2)) { /* Call to constant address. */ + ci.func = (ASMFunction)(void *)(IR(ir->op2)->i); + } else { /* Need a non-argument register for indirect calls. */ + Reg freg = ra_alloc1(as, ir->op2, RSET_RANGE(RID_R4, RID_R12+1)); + emit_m(as, ARMI_BLXr, freg); + ci.func = (ASMFunction)(void *)0; + } + asm_gencall(as, &ci, args); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + /* Need to force a spill on REF_BASE now to update the stack slot. */ + emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE))); + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guardcc(as, CC_NE); + emit_nm(as, ARMI_CMP, RID_TMP, + ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); + emit_lso(as, ARMI_LDR, RID_TMP, base, -4); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_conv(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + /* FP conversions and 64 bit integer conversions are handled by SPLIT. */ + lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); + lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64)); + if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if ((as->flags & JIT_F_ARMV6)) { + ARMIns ai = st == IRT_I8 ? ARMI_SXTB : + st == IRT_U8 ? ARMI_UXTB : + st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH; + emit_dm(as, ai, dest, left); + } else if (st == IRT_U8) { + emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left); + } else { + uint32_t shift = st == IRT_I8 ? 24 : 16; + ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR; + emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP); + emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left); + } + } else { /* Handle 32/32 bit no-op (cast). */ + ra_leftov(as, dest, ir->op1); /* Do nothing, but may need to move regs. */ + } +} + +static void asm_strto(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum]; + IRRef args[2]; + Reg rlo = 0, rhi = 0, tmp; + int destused = ra_used(ir); + int32_t ofs = 0; + ra_evictset(as, RSET_SCRATCH); + if (destused) { + if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) && + (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) { + int i; + for (i = 0; i < 2; i++) { + Reg r = (ir+i)->r; + if (ra_hasreg(r)) { + ra_free(as, r); + ra_modified(as, r); + emit_spload(as, ir+i, r, sps_scale((ir+i)->s)); + } + } + ofs = sps_scale(ir->s); + destused = 0; + } else { + rhi = ra_dest(as, ir+1, RSET_GPR); + rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi)); + } + } + asm_guardcc(as, CC_EQ); + if (destused) { + emit_lso(as, ARMI_LDR, rhi, RID_SP, 4); + emit_lso(as, ARMI_LDR, rlo, RID_SP, 0); + } + emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + tmp = ra_releasetmp(as, ASMREF_TMP1); + if (ofs == 0) + emit_dm(as, ARMI_MOV, tmp, RID_SP); + else + emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); +} + +/* Get pointer to TValue. */ +static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) +{ + IRIns *ir = IR(ref); + if (irt_isnum(ir->t)) { /* Use the number constant itself as a TValue. */ + lua_assert(irref_isk(ref)); + ra_allockreg(as, i32ptr(ir_knum(ir)), dest); + } else { + /* Otherwise use [sp] and [sp+4] to hold the TValue. */ + RegSet allow = rset_exclude(RSET_GPR, dest); + Reg type; + emit_dm(as, ARMI_MOV, dest, RID_SP); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, allow); + emit_lso(as, ARMI_STR, src, RID_SP, 0); + } + if ((ir+1)->o == IR_HIOP) + type = ra_alloc1(as, ref+1, allow); + else + type = ra_allock(as, irt_toitype(ir->t), allow); + emit_lso(as, ARMI_STR, type, RID_SP, 4); + } +} + +static void asm_tostr(ASMState *as, IRIns *ir) +{ + IRRef args[2]; + args[0] = ASMREF_L; + as->gcsteps++; + if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) { + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; + args[1] = ASMREF_TMP1; /* const lua_Number * */ + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); + asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1); + } else { + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; + args[1] = ir->op1; /* int32_t k */ + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); + } +} + +/* -- Memory references --------------------------------------------------- */ + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx, base; + if (irref_isk(ir->op2)) { + IRRef tab = IR(ir->op1)->op1; + int32_t ofs = asm_fuseabase(as, tab); + IRRef refa = ofs ? tab : ir->op1; + uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i); + if (k) { + base = ra_alloc1(as, refa, RSET_GPR); + emit_dn(as, ARMI_ADD^k, dest, base); + return; + } + } + base = ra_alloc1(as, ir->op1, RSET_GPR); + idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx); +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir, IROp merge) +{ + RegSet allow = RSET_GPR; + int destused = ra_used(ir); + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_LR; + IRRef refkey = ir->op2; + IRIns *irkey = IR(refkey); + IRType1 kt = irkey->t; + int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt)); + uint32_t khash; + MCLabel l_end, l_loop; + rset_clear(allow, tab); + if (!irref_isk(refkey) || irt_isstr(kt)) { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + if (irkey[1].o == IR_HIOP) { + if (ra_hasreg((irkey+1)->r)) { + keynumhi = (irkey+1)->r; + keyhi = RID_TMP; + ra_noweak(as, keynumhi); + } else { + keyhi = keynumhi = ra_allocref(as, refkey+1, allow); + } + rset_clear(allow, keynumhi); + khi = 0; + } + } else if (irt_isnum(kt)) { + int32_t val = (int32_t)ir_knum(irkey)->u32.lo; + k = emit_isk12(ARMI_CMP, val); + if (!k) { + key = ra_allock(as, val, allow); + rset_clear(allow, key); + } + val = (int32_t)ir_knum(irkey)->u32.hi; + khi = emit_isk12(ARMI_CMP, val); + if (!khi) { + keyhi = ra_allock(as, val, allow); + rset_clear(allow, keyhi); + } + } else if (!irt_ispri(kt)) { + k = emit_isk12(ARMI_CMP, irkey->i); + if (!k) { + key = ra_alloc1(as, refkey, allow); + rset_clear(allow, key); + } + } + if (!irt_ispri(kt)) + tmp = ra_scratchpair(as, allow); + + /* Key not found in chain: jump to exit (if merged) or load niltv. */ + l_end = emit_label(as); + as->invmcp = NULL; + if (merge == IR_NE) + asm_guardcc(as, CC_AL); + else if (destused) + emit_loada(as, dest, niltvg(J2G(as->J))); + + /* Follow hash chain until the end. */ + l_loop = --as->mcp; + emit_n(as, ARMI_CMP|ARMI_K12|0, dest); + emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next)); + + /* Type and value comparison. */ + if (merge == IR_EQ) + asm_guardcc(as, CC_EQ); + else + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + if (!irt_ispri(kt)) { + emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^khi, tmp+1, keyhi); + emit_nm(as, ARMI_CMP^k, tmp, key); + emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key)); + } else { + emit_n(as, ARMI_CMP^khi, tmp); + emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it)); + } + *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); + + /* Load main position relative to tab->node into dest. */ + khash = irref_isk(refkey) ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + } else { + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp); + emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp); + if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */ + emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash)); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + } else if (irref_isk(refkey)) { + emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash, + rset_exclude(rset_exclude(RSET_GPR, tab), dest)); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + } else { /* Must match with hash*() in lj_tab.c. */ + if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */ + if (keyhi == RID_TMP) + emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi); + emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi); + } + emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP); + emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1); + emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)), + tmp, tmp+1, tmp); + emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); + emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp); + if (ra_hasreg(keynumhi)) { + emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); + emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */ + emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi); + } else { + emit_dnm(as, ARMI_EOR, tmp+1, tmp, key); + emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS, + rset_exclude(rset_exclude(RSET_GPR, tab), key)); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + int32_t kofs = ofs + (int32_t)offsetof(Node, key); + Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); + Reg key = RID_NONE, type = RID_TMP, idx = node; + RegSet allow = rset_exclude(RSET_GPR, node); + lua_assert(ofs % sizeof(Node) == 0); + if (ofs > 4095) { + idx = dest; + rset_clear(allow, dest); + kofs = (int32_t)offsetof(Node, key); + } else if (ra_hasreg(dest)) { + emit_opk(as, ARMI_ADD, dest, node, ofs, allow); + } + asm_guardcc(as, CC_NE); + if (!irt_ispri(irkey->t)) { + RegSet even = (as->freeset & (as->freeset >> 1) & allow & RSET_GPREVEN); + if (even) { + key = ra_scratch(as, even); + if (rset_test(as->freeset, key+1)) { + type = key+1; + ra_modified(as, type); + } + } else { + key = ra_scratch(as, allow); + } + rset_clear(allow, key); + } + rset_clear(allow, type); + if (irt_isnum(irkey->t)) { + emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type, + (int32_t)ir_knum(irkey)->u32.hi, allow); + emit_opk(as, ARMI_CMP, 0, key, + (int32_t)ir_knum(irkey)->u32.lo, allow); + } if (ra_hasreg(key)) { + emit_n(as, ARMF_CC(ARMI_CMN, CC_EQ)|ARMI_K12|-irt_toitype(irkey->t), type); + emit_opk(as, ARMI_CMP, 0, key, irkey->i, allow); + } else { + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type); + } + emit_lso(as, ARMI_LDR, type, idx, kofs+4); + if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs); + if (ofs > 4095) + emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); +} + +static void asm_newref(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; + IRRef args[3]; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* GCtab *t */ + args[2] = ASMREF_TMP1; /* cTValue *key */ + asm_setupresult(as, ir, ci); /* TValue * */ + asm_gencall(as, ci, args); + asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2); +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_lsptr(as, ARMI_LDR, dest, v); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + asm_guardcc(as, CC_NE); + emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP); + emit_opk(as, ARMI_ADD, dest, uv, + (int32_t)offsetof(GCupval, tv), RSET_GPR); + emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); + } else { + emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v)); + } + emit_lso(as, ARMI_LDR, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + UNUSED(as); UNUSED(ir); + lua_assert(!ra_used(ir)); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + IRRef ref = ir->op2, refk = ir->op1; + Reg r; + if (irref_isk(ref)) { + IRRef tmp = refk; refk = ref; ref = tmp; + } else if (!irref_isk(refk)) { + uint32_t k, m = ARMI_K12|sizeof(GCstr); + Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); + IRIns *irr = IR(ir->op2); + if (ra_hasreg(irr->r)) { + ra_noweak(as, irr->r); + right = irr->r; + } else if (mayfuse(as, irr->op2) && + irr->o == IR_ADD && irref_isk(irr->op2) && + (k = emit_isk12(ARMI_ADD, + (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) { + m = k; + right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); + } else { + right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + emit_dn(as, ARMI_ADD^m, dest, dest); + emit_dnm(as, ARMI_ADD, dest, left, right); + return; + } + r = ra_alloc1(as, ref, RSET_GPR); + emit_opk(as, ARMI_ADD, dest, r, + sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r)); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static ARMIns asm_fxloadins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: return ARMI_LDRSB; + case IRT_U8: return ARMI_LDRB; + case IRT_I16: return ARMI_LDRSH; + case IRT_U16: return ARMI_LDRH; + case IRT_NUM: lua_assert(0); + case IRT_FLOAT: + default: return ARMI_LDR; + } +} + +static ARMIns asm_fxstoreins(IRIns *ir) +{ + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: return ARMI_STRB; + case IRT_I16: case IRT_U16: return ARMI_STRH; + case IRT_NUM: lua_assert(0); + case IRT_FLOAT: + default: return ARMI_STR; + } +} + +static void asm_fload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); + ARMIns ai = asm_fxloadins(ir); + int32_t ofs; + if (ir->op2 == IRFL_TAB_ARRAY) { + ofs = asm_fuseabase(as, ir->op1); + if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ + emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx); + return; + } + } + ofs = field_ofs[ir->op2]; + if ((ai & 0x04000000)) + emit_lso(as, ai, dest, idx, ofs); + else + emit_lsox(as, ai, dest, idx, ofs); +} + +static void asm_fstore(ASMState *as, IRIns *ir) +{ + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + IRIns *irf = IR(ir->op1); + Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); + int32_t ofs = field_ofs[irf->op2]; + ARMIns ai = asm_fxstoreins(ir); + if ((ai & 0x04000000)) + emit_lso(as, ai, src, idx, ofs); + else + emit_lsox(as, ai, src, idx, ofs); +} + +static void asm_xload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); + asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR); +} + +static void asm_xstore(ASMState *as, IRIns *ir) +{ + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, + rset_exclude(RSET_GPR, src)); +} + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + int hiop = ((ir+1)->o == IR_HIOP); + IRType t = hiop ? IRT_NUM : irt_type(ir->t); + Reg dest = RID_NONE, type = RID_NONE, idx; + RegSet allow = RSET_GPR; + int32_t ofs = 0; + if (hiop && ra_used(ir+1)) { + type = ra_dest(as, ir+1, allow); + rset_clear(allow, type); + } + if (ra_used(ir)) { + lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); + dest = ra_dest(as, ir, allow); + rset_clear(allow, dest); + } + idx = asm_fuseahuref(as, ir->op1, &ofs, allow); + if (!hiop) { + rset_clear(allow, idx); + if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && + rset_test((as->freeset & allow), dest+1)) { + type = dest+1; + ra_modified(as, type); + } else { + type = RID_TMP; + } + } + asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); + if (ra_hasreg(dest)) emit_lso(as, ARMI_LDR, dest, idx, ofs); + emit_lso(as, ARMI_LDR, type, idx, ofs+4); +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_GPR; + Reg idx, src = RID_NONE, type = RID_NONE; + int32_t ofs = 0; + int hiop = ((ir+1)->o == IR_HIOP); + if (!irt_ispri(ir->t)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + } + if (hiop) + type = ra_alloc1(as, (ir+1)->op2, allow); + else + type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); + idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type)); + if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs); + emit_lso(as, ARMI_STR, type, idx, ofs+4); +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); + int hiop = ((ir+1)->o == IR_HIOP); + IRType t = hiop ? IRT_NUM : irt_type(ir->t); + Reg dest = RID_NONE, type = RID_NONE, base; + RegSet allow = RSET_GPR; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); + lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ + if (hiop && ra_used(ir+1)) { + type = ra_dest(as, ir+1, allow); + rset_clear(allow, type); + } + if (ra_used(ir)) { + lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); + dest = ra_dest(as, ir, allow); + rset_clear(allow, dest); + } + base = ra_alloc1(as, REF_BASE, allow); + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + if (ra_noreg(type)) { + rset_clear(allow, base); + if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 && + rset_test((as->freeset & allow), dest+1)) { + type = dest+1; + ra_modified(as, type); + } else { + type = RID_TMP; + } + } + asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE); + emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type); + } + if (ra_hasreg(dest)) emit_lso(as, ARMI_LDR, dest, base, ofs); + if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4); +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID typeid = (CTypeID)IR(ir->op1)->i; + CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? + lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i; + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[2]; + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); + lua_assert(sz != CTSIZE_INVALID); + + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + int32_t ofs = sizeof(GCcdata); + lua_assert(sz == 4 || sz == 8); + if (sz == 8) { + ofs += 4; ir++; + lua_assert(ir->o == IR_HIOP); + } + for (;;) { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_lso(as, ARMI_STR, r, RID_RET, ofs); + rset_clear(allow, r); + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; ir--; + } + } + /* Initialize gct and typeid. lj_mem_newgco() already sets marked. */ + { + uint32_t k = emit_isk12(ARMI_MOV, typeid); + Reg r = k ? RID_R1 : ra_allock(as, typeid, allow); + emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); + emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, typeid)); + emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); + if (k) emit_d(as, ARMI_MOV^k, RID_R1); + } + asm_gencall(as, ci, args); + ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), + ra_releasetmp(as, ASMREF_TMP1)); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + Reg gr = ra_allock(as, i32ptr(J2G(as->J)), + rset_exclude(rset_exclude(RSET_GPR, tab), link)); + Reg mark = RID_TMP; + MCLabel l_end = emit_label(as); + emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist)); + emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked)); + emit_lso(as, ARMI_STR, tab, gr, + (int32_t)offsetof(global_State, gc.grayagain)); + emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark); + emit_lso(as, ARMI_LDR, link, gr, + (int32_t)offsetof(global_State, gc.grayagain)); + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark); + emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj, val, tmp; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + if ((*as->mcp >> 28) == CC_AL) + *as->mcp = ARMF_CC(*as->mcp, CC_NE); + else + emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end); + ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1)); + obj = IR(ir->op1)->r; + tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); + emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp); + emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP); + val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); + emit_lso(as, ARMI_LDRB, tmp, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); + emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked)); +} + +/* -- Arithmetic and logic operations ------------------------------------- */ + +static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) +{ + IRIns *ir; + if (irref_isk(rref)) + return 0; /* Don't swap constants to the left. */ + if (irref_isk(lref)) + return 1; /* But swap constants to the right. */ + ir = IR(rref); + if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || + (ir->o == IR_ADD && ir->op1 == ir->op2)) + return 0; /* Don't swap fusable operands to the left. */ + ir = IR(lref); + if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) || + (ir->o == IR_ADD && ir->op1 == ir->op2)) + return 1; /* But swap fusable operands to the right. */ + return 0; /* Otherwise don't swap. */ +} + +static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai) +{ + IRRef lref = ir->op1, rref = ir->op2; + Reg left, dest = ra_dest(as, ir, RSET_GPR); + uint32_t m; + if (asm_swapops(as, lref, rref)) { + IRRef tmp = lref; lref = rref; rref = tmp; + if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC) + ai ^= (ARMI_SUB^ARMI_RSB); + } + left = ra_hintalloc(as, lref, dest, RSET_GPR); + m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left)); + if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */ + asm_guardcc(as, CC_VS); + ai |= ARMI_S; + } + emit_dn(as, ai^m, dest, left); +} + +static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) +{ + if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ + uint32_t cc = (as->mcp[1] >> 28); + as->flagmcp = NULL; + if (cc <= CC_NE) { + as->mcp++; + ai |= ARMI_S; + } else if (cc == CC_GE) { + *++as->mcp ^= ((CC_GE^CC_PL) << 28); + ai |= ARMI_S; + } else if (cc == CC_LT) { + *++as->mcp ^= ((CC_LT^CC_MI) << 28); + ai |= ARMI_S; + } /* else: other conds don't work with bit ops. */ + } + if (ir->op2 == 0) { + Reg dest = ra_dest(as, ir, RSET_GPR); + uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); + emit_d(as, ai^m, dest); + } else { + /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ + asm_intop(as, ir, ai); + } +} + +static void asm_arithop(ASMState *as, IRIns *ir, ARMIns ai) +{ + if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */ + as->flagmcp = NULL; + as->mcp++; + ai |= ARMI_S; + } + asm_intop(as, ir, ai); +} + +static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + emit_dn(as, ai|ARMI_K12|0, dest, left); +} + +/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */ +static void asm_intmul(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest)); + Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + Reg tmp = RID_NONE; + /* ARMv5 restriction: dest != left and dest_hi != left. */ + if (dest == left && left != right) { left = right; right = dest; } + if (irt_isguard(ir->t)) { /* IR_MULOV */ + if (!(as->flags & JIT_F_ARMV6) && dest == left) + tmp = left = ra_scratch(as, rset_exclude(RSET_FPR, left)); + asm_guardcc(as, CC_NE); + emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest); + emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left); + } else { + if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP; + emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left); + } + /* Only need this for the dest == left == right case. */ + if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right); +} + +static void asm_intmod(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi]; + IRRef args[2]; + args[0] = ir->op1; + args[1] = ir->op2; + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static void asm_bitswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + if ((as->flags & JIT_F_ARMV6)) { + emit_dm(as, ARMI_REV, dest, left); + } else { + Reg tmp2 = dest; + if (tmp2 == left) + tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left)); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP); + emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left); + emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP); + emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left); + } +} + +static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) +{ + if (irref_isk(ir->op2)) { /* Constant shifts. */ + /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */ + /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + int32_t shift = (IR(ir->op2)->i & 31); + emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_alloc1(as, ir->op1, RSET_GPR); + Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left); + } +} + +static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) +{ + uint32_t kcmp = 0, kmov = 0; + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); + Reg right = 0; + if (irref_isk(ir->op2)) { + kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i); + if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i); + } + if (!kmov) { + kcmp = 0; + right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); + } + if (dest != right) { + emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right); + cc ^= 1; /* Must use opposite conditions for paired moves. */ + } else { + cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */ + } + if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, left); + emit_nm(as, ARMI_CMP^kcmp, left, right); +} + +static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; + RegSet drop = RSET_SCRATCH; + Reg r; + IRRef args[4]; + args[0] = ir->op1; args[1] = (ir+1)->op1; + args[2] = ir->op2; args[3] = (ir+1)->op2; + /* __aeabi_cdcmple preserves r0-r3. */ + if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); + if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r); + if (!rset_test(as->freeset, RID_R2) && + regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2); + if (!rset_test(as->freeset, RID_R3) && + regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3); + ra_evictset(as, drop); + ra_destpair(as, ir); + emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3); + emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RET, RID_R2); + emit_call(as, (void *)ci->func); + for (r = RID_R0; r <= RID_R3; r++) + ra_leftov(as, r, args[r-RID_R0]); +} + +/* -- Comparisons --------------------------------------------------------- */ + +/* Map of comparisons to flags. ORDER IR. */ +static const uint8_t asm_compmap[IR_ABC+1] = { + /* op FP swp int cc FP cc */ + /* LT */ CC_GE + (CC_HS << 4), + /* GE x */ CC_LT + (CC_HI << 4), + /* LE */ CC_GT + (CC_HI << 4), + /* GT x */ CC_LE + (CC_HS << 4), + /* ULT x */ CC_HS + (CC_LS << 4), + /* UGE */ CC_LO + (CC_LO << 4), + /* ULE x */ CC_HI + (CC_LO << 4), + /* UGT */ CC_LS + (CC_LS << 4), + /* EQ */ CC_NE + (CC_NE << 4), + /* NE */ CC_EQ + (CC_EQ << 4), + /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */ +}; + +/* FP comparisons. */ +static void asm_fpcomp(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; + RegSet drop = RSET_SCRATCH; + Reg r; + IRRef args[4]; + int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1); + args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1; + args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2; + /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */ + for (r = RID_R0; r <= RID_R3; r++) + if (!rset_test(as->freeset, r) && + regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r); + ra_evictset(as, drop); + asm_guardcc(as, (asm_compmap[ir->o] >> 4)); + emit_call(as, (void *)ci->func); + for (r = RID_R0; r <= RID_R3; r++) + ra_leftov(as, r, args[r-RID_R0]); +} + +/* Integer comparisons. */ +static void asm_intcomp(ASMState *as, IRIns *ir) +{ + ARMCC cc = (asm_compmap[ir->o] & 15); + IRRef lref = ir->op1, rref = ir->op2; + Reg left; + uint32_t m; + int cmpprev0 = 0; + lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t)); + if (asm_swapops(as, lref, rref)) { + Reg tmp = lref; lref = rref; rref = tmp; + if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ + else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ + } + if (irref_isk(rref) && IR(rref)->i == 0) { + IRIns *irl = IR(lref); + cmpprev0 = (irl+1 == ir); + /* Combine comp(BAND(left, right), 0) into tst left, right. */ + if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) { + IRRef blref = irl->op1, brref = irl->op2; + uint32_t m2 = 0; + Reg bleft; + if (asm_swapops(as, blref, brref)) { + Reg tmp = blref; blref = brref; brref = tmp; + } + if (irref_isk(brref)) { + m2 = emit_isk12(ARMI_AND, IR(brref)->i); + if ((m2 & (ARMI_AND^ARMI_BIC))) + goto notst; /* Not beneficial if we miss a constant operand. */ + } + if (cc == CC_GE) cc = CC_PL; + else if (cc == CC_LT) cc = CC_MI; + else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */ + bleft = ra_alloc1(as, blref, RSET_GPR); + if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft)); + asm_guardcc(as, cc); + emit_n(as, ARMI_TST^m2, bleft); + return; + } + } +notst: + left = ra_alloc1(as, lref, RSET_GPR); + m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left)); + asm_guardcc(as, cc); + emit_n(as, ARMI_CMP^m, left); + /* Signed comparison with zero and referencing previous ins? */ + if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE)) + as->flagmcp = as->mcp; /* Allow elimination of the compare. */ +} + +/* 64 bit integer comparisons. */ +static void asm_int64comp(ASMState *as, IRIns *ir) +{ + int signedcomp = (ir->o <= IR_GT); + ARMCC cclo, cchi; + Reg leftlo, lefthi; + uint32_t mlo, mhi; + RegSet allow = RSET_GPR, oldfree; + + /* Always use unsigned comparison for loword. */ + cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15; + leftlo = ra_alloc1(as, ir->op1, allow); + oldfree = as->freeset; + mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo)); + allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */ + + /* Use signed or unsigned comparison for hiword. */ + cchi = asm_compmap[ir->o] & 15; + lefthi = ra_alloc1(as, (ir+1)->op1, allow); + mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi)); + + /* All register allocations must be performed _before_ this point. */ + if (signedcomp) { + MCLabel l_around = emit_label(as); + asm_guardcc(as, cclo); + emit_n(as, ARMI_CMP^mlo, leftlo); + emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around); + if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */ + asm_guardcc(as, cchi); + } else { + asm_guardcc(as, cclo); + emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo); + } + emit_n(as, ARMI_CMP^mhi, lefthi); +} + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */ + as->curins--; /* Always skip the loword comparison. */ + if (irt_isint(ir->t)) + asm_int64comp(as, ir-1); + else + asm_fpcomp(as, ir-1); + return; + } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { + if (uselo || usehi || !(as->flags & JIT_F_OPT_DCE)) { + as->curins--; /* Always skip the loword min/max. */ + asm_fpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO); + } + return; + } + if (!usehi && (as->flags & JIT_F_OPT_DCE)) + return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { +#if LJ_HASFFI + case IR_ADD: + if (uselo) { + as->curins--; + asm_intop(as, ir, ARMI_ADC); + asm_intop(as, ir-1, ARMI_ADD|ARMI_S); + } else { + asm_intop(as, ir, ARMI_ADD); + } + break; + case IR_SUB: + if (uselo) { + as->curins--; + asm_intop(as, ir, ARMI_SBC); + asm_intop(as, ir-1, ARMI_SUB|ARMI_S); + } else { + asm_intop(as, ir, ARMI_SUB); + } + break; + case IR_NEG: + if (uselo) { + as->curins--; + asm_intneg(as, ir, ARMI_RSC); + asm_intneg(as, ir-1, ARMI_RSB|ARMI_S); + } else { + asm_intneg(as, ir, ARMI_RSB); + } + break; +#endif + case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + case IR_STRTO: + if (!uselo) + ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */ + break; + case IR_CALLN: + case IR_CALLS: + case IR_CALLXS: + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RET)); /* Mark lo op as used. */ + break; + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: + case IR_TOSTR: case IR_CNEWI: + /* Nothing to do here. Handled by lo op itself. */ + break; + default: lua_assert(0); break; + } +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + Reg pbase; + uint32_t k; + if (irp) { + exitno = as->T->nsnap; /* Highest exit + 1 indicates stack check. */ + if (ra_hasreg(irp->r)) { + pbase = irp->r; + } else if (allow) { + pbase = rset_pickbot(allow); + } else { + pbase = RID_RET; + emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */ + } + } else { + pbase = RID_BASE; + } + emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); + k = emit_isk12(0, (int32_t)(8*topslot)); + lua_assert(k); + emit_n(as, ARMI_CMP^k, RID_TMP); + emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); + emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, + (int32_t)offsetof(lua_State, maxstack)); + if (irp) { /* Must not spill arbitrary registers in head of side trace. */ + int32_t i = i32ptr(&J2G(as->J)->jit_L); + if (ra_noreg(irp->r)) { + lua_assert(ra_hasspill(irp->s)); + emit_lso(as, ARMI_LDR, RID_RET, RID_SP, sps_scale(irp->s)); + } + emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095)); + if (ra_noreg(irp->r)) { + emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */ + } + emit_loadi(as, RID_TMP, (i & ~4095)); + } else { + emit_getgl(as, RID_TMP, jit_L); + } +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + SnapEntry *flinks = map + nent + snap->depth; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { + RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); + Reg tmp; + lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ + tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, + rset_exclude(RSET_GPREVEN, RID_BASE)); + emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); + if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1); + tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd); + emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4); + } else { + RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); + Reg type; + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); + if (!irt_ispri(ir->t)) { + Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); + emit_lso(as, ARMI_STR, src, RID_BASE, ofs); + if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s == 0) continue; /* Do not overwrite link to previous frame. */ + type = ra_allock(as, (int32_t)(*flinks--), odd); + } else if ((sn & SNAP_SOFTFPNUM)) { + type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE)); + } else { + type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd); + } + emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4); + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp1, tmp2; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ + emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + tmp1 = ra_releasetmp(as, ASMREF_TMP1); + tmp2 = ra_releasetmp(as, ASMREF_TMP2); + emit_loadi(as, tmp2, (int32_t)as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end); + emit_nm(as, ARMI_CMP, RID_TMP, tmp2); + emit_lso(as, ARMI_LDR, tmp2, tmp1, + (int32_t)offsetof(global_State, gc.threshold)); + emit_lso(as, ARMI_LDR, RID_TMP, tmp1, + (int32_t)offsetof(global_State, gc.total)); + ra_allockreg(as, i32ptr(J2G(as->J)), tmp1); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guardcc already inverted the bcc and patched the final bl. */ + p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu); + } else { + p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu); + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Reload L register from g->jit_L. */ +static void asm_head_lreg(ASMState *as) +{ + IRIns *ir = IR(ASMREF_L); + if (ra_used(ir)) { + Reg r = ra_dest(as, ir, RSET_GPR); + emit_getgl(as, r, jit_L); + ra_evictk(as); + } +} + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir; + asm_head_lreg(as); + ir = IR(REF_BASE); + if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir); + ra_destreg(as, ir, RID_BASE); +} + +/* Coalesce BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir; + asm_head_lreg(as); + ir = IR(REF_BASE); + if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir); + if (ra_hasspill(irp->s)) { + rset_clear(allow, ra_dest(as, ir, allow)); + } else { + lua_assert(ra_hasreg(irp->r)); + rset_clear(allow, irp->r); + ra_destreg(as, ir, irp->r); + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + MCode *p = as->mctop; + MCode *target; + int32_t spadj = as->T->spadjust; + if (spadj == 0) { + as->mctop = --p; + } else { + /* Patch stack adjustment. */ + uint32_t k = emit_isk12(ARMI_ADD, spadj); + lua_assert(k); + p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); + } + /* Patch exit branch. */ + target = lnk == TRACE_INTERP ? (MCode *)lj_vm_exit_interp : + traceref(as->J, lnk)->mcode; + p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu); +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + MCode *p = as->mctop - 1; /* Leave room for exit branch. */ + if (as->loopref) { + as->invmcp = as->mcp = p; + } else { + as->mcp = p-1; /* Leave room for stack pointer adjustment. */ + as->invmcp = NULL; + } + *p = 0; /* Prevent load/store merging. */ +} + +/* -- Instruction dispatch ------------------------------------------------ */ + +/* Assemble a single instruction. */ +static void asm_ir(ASMState *as, IRIns *ir) +{ + switch ((IROp)ir->o) { + /* Miscellaneous ops. */ + case IR_LOOP: asm_loop(as); break; + case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; + case IR_USE: ra_alloc1(as, ir->op1, RSET_GPR); break; + case IR_PHI: asm_phi(as, ir); break; + case IR_HIOP: asm_hiop(as, ir); break; + + /* Guarded assertions. */ + case IR_EQ: case IR_NE: + if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) { + as->curins--; + asm_href(as, ir-1, (IROp)ir->o); + break; + } + /* fallthrough */ + case IR_LT: case IR_GE: case IR_LE: case IR_GT: + case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: + case IR_ABC: + asm_intcomp(as, ir); + break; + + case IR_RETF: asm_retf(as, ir); break; + + /* Bit ops. */ + case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break; + case IR_BSWAP: asm_bitswap(as, ir); break; + + case IR_BAND: asm_bitop(as, ir, ARMI_AND); break; + case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break; + case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break; + + case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break; + case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break; + case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break; + case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break; + case IR_BROL: lua_assert(0); break; + + /* Arithmetic ops. */ + case IR_ADD: case IR_ADDOV: asm_arithop(as, ir, ARMI_ADD); break; + case IR_SUB: case IR_SUBOV: asm_arithop(as, ir, ARMI_SUB); break; + case IR_MUL: case IR_MULOV: asm_intmul(as, ir); break; + case IR_MOD: asm_intmod(as, ir); break; + + case IR_NEG: asm_intneg(as, ir, ARMI_RSB); break; + + case IR_MIN: asm_intmin_max(as, ir, CC_GT); break; + case IR_MAX: asm_intmin_max(as, ir, CC_LT); break; + + case IR_FPMATH: case IR_ATAN2: case IR_LDEXP: + case IR_DIV: case IR_POW: case IR_ABS: case IR_TOBIT: + lua_assert(0); /* Unused for LJ_SOFTFP. */ + break; + + /* Memory references. */ + case IR_AREF: asm_aref(as, ir); break; + case IR_HREF: asm_href(as, ir, 0); break; + case IR_HREFK: asm_hrefk(as, ir); break; + case IR_NEWREF: asm_newref(as, ir); break; + case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; + case IR_FREF: asm_fref(as, ir); break; + case IR_STRREF: asm_strref(as, ir); break; + + /* Loads and stores. */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + asm_ahuvload(as, ir); + break; + case IR_FLOAD: asm_fload(as, ir); break; + case IR_XLOAD: asm_xload(as, ir); break; + case IR_SLOAD: asm_sload(as, ir); break; + + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; + case IR_FSTORE: asm_fstore(as, ir); break; + case IR_XSTORE: asm_xstore(as, ir); break; + + /* Allocations. */ + case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; + case IR_TNEW: asm_tnew(as, ir); break; + case IR_TDUP: asm_tdup(as, ir); break; + case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; + + /* Write barriers. */ + case IR_TBAR: asm_tbar(as, ir); break; + case IR_OBAR: asm_obar(as, ir); break; + + /* Type conversions. */ + case IR_CONV: asm_conv(as, ir); break; + case IR_TOSTR: asm_tostr(as, ir); break; + case IR_STRTO: asm_strto(as, ir); break; + + /* Calls. */ + case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; + case IR_CALLXS: asm_callx(as, ir); break; + case IR_CARG: break; + + default: + setintV(&as->J->errinfo, ir->o); + lj_trace_err_info(as->J, LJ_TRERR_NYIIR); + break; + } +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX]; + uint32_t i, nargs = (int)CCI_NARGS(ci); + int nslots = 0, ngpr = REGARG_NUMGPR; + asm_collectargs(as, ir, ci, args); + for (i = 0; i < nargs; i++) + if (!LJ_SOFTFP && irt_isfp(IR(args[i])->t)) { + ngpr &= ~1; + if (ngpr > 0) ngpr -= 2; else nslots += 2; + } else { + if (ngpr > 0) ngpr--; else nslots++; + } + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; + return REGSP_HINT(RID_RET); +} + +static void asm_setup_target(ASMState *as) +{ + /* May need extra exit for asm_stack_check on side traces. */ + asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *pe = (MCode *)((char *)p + T->szmcode); + MCode *cstart = NULL, *cend = p; + MCode *mcarea = lj_mcode_patch(J, p, 0); + MCode *px = exitstub_addr(J, exitno) - 2; + for (; p < pe; p++) { + /* Look for bl_cc exitstub, replace with b_cc target. */ + uint32_t ins = *p; + if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u && + ((ins ^ (px-p)) & 0x00ffffffu) == 0) { + *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu); + cend = p+1; + if (!cstart) cstart = p; + } + } + lua_assert(cstart != NULL); + asm_cache_flush(cstart, cend); + lj_mcode_patch(J, mcarea, 1); +} + diff --git a/src/luajit2/src/lj_asm_x86.h b/src/luajit2/src/lj_asm_x86.h new file mode 100644 index 0000000000..141957c709 --- /dev/null +++ b/src/luajit2/src/lj_asm_x86.h @@ -0,0 +1,2685 @@ +/* +** x86/x64 IR assembler (SSA IR -> machine code). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Guard handling ------------------------------------------------------ */ + +/* Generate an exit stub group at the bottom of the reserved MCode memory. */ +static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) +{ + ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; + MCode *mxp = as->mcbot; + MCode *mxpstart = mxp; + if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) + asm_mclimit(as); + /* Push low byte of exitno for each exit stub. */ + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; + for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { + *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); + } + /* Push the high byte of the exitno for each exit stub group. */ + *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); + /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ + *mxp++ = XI_MOVmi; + *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); + *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + *mxp++ = 2*sizeof(void *); + *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; + /* Jump to exit handler which fills in the ExitState. */ + *mxp++ = XI_JMP; mxp += 4; + *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); + /* Commit the code for this group (even if assembly fails later on). */ + lj_mcode_commitbot(as->J, mxp); + as->mcbot = mxp; + as->mclim = as->mcbot + MCLIM_REDZONE; + return mxpstart; +} + +/* Setup all needed exit stubs. */ +static void asm_exitstub_setup(ASMState *as, ExitNo nexits) +{ + ExitNo i; + if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) + lj_trace_err(as->J, LJ_TRERR_SNAPOV); + for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) + if (as->J->exitstubgroup[i] == NULL) + as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); +} + +/* Emit conditional branch to exit for guard. +** It's important to emit this *after* all registers have been allocated, +** because rematerializations may invalidate the flags. +*/ +static void asm_guardcc(ASMState *as, int cc) +{ + MCode *target = exitstub_addr(as->J, as->snapno); + MCode *p = as->mcp; + if (LJ_UNLIKELY(p == as->invmcp)) { + as->loopinv = 1; + *(int32_t *)(p+1) = jmprel(p+5, target); + target = p; + cc ^= 1; + if (as->realign) { + emit_sjcc(as, cc, target); + return; + } + } + emit_jcc(as, cc, target); +} + +/* -- Memory operand fusion ----------------------------------------------- */ + +/* Limit linear search to this distance. Avoids O(n^2) behavior. */ +#define CONFLICT_SEARCH_LIM 31 + +/* Check if a reference is a signed 32 bit constant. */ +static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) +{ + if (irref_isk(ref)) { + IRIns *ir = IR(ref); + if (ir->o != IR_KINT64) { + *k = ir->i; + return 1; + } else if (checki32((int64_t)ir_kint64(ir)->u64)) { + *k = (int32_t)ir_kint64(ir)->u64; + return 1; + } + } + return 0; +} + +/* Check if there's no conflicting instruction between curins and ref. +** Also avoid fusing loads if there are multiple references. +*/ +static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) +{ + IRIns *ir = as->ir; + IRRef i = as->curins; + if (i > ref + CONFLICT_SEARCH_LIM) + return 0; /* Give up, ref is too far away. */ + while (--i > ref) { + if (ir[i].o == conflict) + return 0; /* Conflict found. */ + else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) + return 0; + } + return 1; /* Ok, no conflict. */ +} + +/* Fuse array base into memory operand. */ +static IRRef asm_fuseabase(ASMState *as, IRRef ref) +{ + IRIns *irb = IR(ref); + as->mrm.ofs = 0; + if (irb->o == IR_FLOAD) { + IRIns *ira = IR(irb->op1); + lua_assert(irb->op2 == IRFL_TAB_ARRAY); + /* We can avoid the FLOAD of t->array for colocated arrays. */ + if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && + noconflict(as, irb->op1, IR_NEWREF, 1)) { + as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ + return irb->op1; /* Table obj. */ + } + } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { + /* Fuse base offset (vararg load). */ + as->mrm.ofs = IR(irb->op2)->i; + return irb->op1; + } + return ref; /* Otherwise use the given array base. */ +} + +/* Fuse array reference into memory operand. */ +static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irx; + lua_assert(ir->o == IR_AREF); + as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); + irx = IR(ir->op2); + if (irref_isk(ir->op2)) { + as->mrm.ofs += 8*irx->i; + as->mrm.idx = RID_NONE; + } else { + rset_clear(allow, as->mrm.base); + as->mrm.scale = XM_SCALE8; + /* Fuse a constant ADD (e.g. t[i+1]) into the offset. + ** Doesn't help much without ABCelim, but reduces register pressure. + */ + if (!LJ_64 && /* Has bad effects with negative index on x64. */ + mayfuse(as, ir->op2) && ra_noreg(irx->r) && + irx->o == IR_ADD && irref_isk(irx->op2)) { + as->mrm.ofs += 8*IR(irx->op2)->i; + as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); + } else { + as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); + } + } +} + +/* Fuse array/hash/upvalue reference into memory operand. +** Caveat: this may allocate GPRs for the base/idx registers. Be sure to +** pass the final allow mask, excluding any GPRs used for other inputs. +** In particular: 2-operand GPR instructions need to call ra_dest() first! +*/ +static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_noreg(ir->r)) { + switch ((IROp)ir->o) { + case IR_AREF: + if (mayfuse(as, ref)) { + asm_fusearef(as, ir, allow); + return; + } + break; + case IR_HREFK: + if (mayfuse(as, ref)) { + as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); + as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); + as->mrm.idx = RID_NONE; + return; + } + break; + case IR_UREFC: + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; + as->mrm.ofs = ptr2addr(&uv->tv); + as->mrm.base = as->mrm.idx = RID_NONE; + return; + } + break; + default: + lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || + ir->o == IR_KKPTR); + break; + } + } + as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); + as->mrm.ofs = 0; + as->mrm.idx = RID_NONE; +} + +/* Fuse FLOAD/FREF reference into memory operand. */ +static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) +{ + lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); + as->mrm.ofs = field_ofs[ir->op2]; + as->mrm.idx = RID_NONE; + if (irref_isk(ir->op1)) { + as->mrm.ofs += IR(ir->op1)->i; + as->mrm.base = RID_NONE; + } else { + as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); + } +} + +/* Fuse string reference into memory operand. */ +static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) +{ + IRIns *irr; + lua_assert(ir->o == IR_STRREF); + as->mrm.base = as->mrm.idx = RID_NONE; + as->mrm.scale = XM_SCALE1; + as->mrm.ofs = sizeof(GCstr); + if (irref_isk(ir->op1)) { + as->mrm.ofs += IR(ir->op1)->i; + } else { + Reg r = ra_alloc1(as, ir->op1, allow); + rset_clear(allow, r); + as->mrm.base = (uint8_t)r; + } + irr = IR(ir->op2); + if (irref_isk(ir->op2)) { + as->mrm.ofs += irr->i; + } else { + Reg r; + /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ + if (!LJ_64 && /* Has bad effects with negative index on x64. */ + mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { + as->mrm.ofs += IR(irr->op2)->i; + r = ra_alloc1(as, irr->op1, allow); + } else { + r = ra_alloc1(as, ir->op2, allow); + } + if (as->mrm.base == RID_NONE) + as->mrm.base = (uint8_t)r; + else + as->mrm.idx = (uint8_t)r; + } +} + +static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + as->mrm.idx = RID_NONE; + if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { + as->mrm.ofs = ir->i; + as->mrm.base = RID_NONE; + } else if (ir->o == IR_STRREF) { + asm_fusestrref(as, ir, allow); + } else { + as->mrm.ofs = 0; + if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { + /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ + IRIns *irx; + IRRef idx; + Reg r; + if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ + ref = ir->op1; + ir = IR(ref); + if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) + goto noadd; + } + as->mrm.scale = XM_SCALE1; + idx = ir->op1; + ref = ir->op2; + irx = IR(idx); + if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ + idx = ir->op2; + ref = ir->op1; + irx = IR(idx); + } + if (canfuse(as, irx) && ra_noreg(irx->r)) { + if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { + /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */ + idx = irx->op1; + as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); + } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { + /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ + idx = irx->op1; + as->mrm.scale = XM_SCALE2; + } + } + r = ra_alloc1(as, idx, allow); + rset_clear(allow, r); + as->mrm.idx = (uint8_t)r; + } + noadd: + as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); + } +} + +/* Fuse load into memory operand. */ +static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) +{ + IRIns *ir = IR(ref); + if (ra_hasreg(ir->r)) { + if (allow != RSET_EMPTY) { /* Fast path. */ + ra_noweak(as, ir->r); + return ir->r; + } + fusespill: + /* Force a spill if only memory operands are allowed (asm_x87load). */ + as->mrm.base = RID_ESP; + as->mrm.ofs = ra_spill(as, ir); + as->mrm.idx = RID_NONE; + return RID_MRM; + } + if (ir->o == IR_KNUM) { + RegSet avail = as->freeset & ~as->modset & RSET_FPR; + lua_assert(allow != RSET_EMPTY); + if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ + as->mrm.ofs = ptr2addr(ir_knum(ir)); + as->mrm.base = as->mrm.idx = RID_NONE; + return RID_MRM; + } + } else if (mayfuse(as, ref)) { + RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; + if (ir->o == IR_SLOAD) { + if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && + noconflict(as, ref, IR_RETF, 0)) { + as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); + as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); + as->mrm.idx = RID_NONE; + return RID_MRM; + } + } else if (ir->o == IR_FLOAD) { + /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ + if ((irt_isint(ir->t) || irt_isaddr(ir->t)) && + noconflict(as, ref, IR_FSTORE, 0)) { + asm_fusefref(as, ir, xallow); + return RID_MRM; + } + } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { + if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { + asm_fuseahuref(as, ir->op1, xallow); + return RID_MRM; + } + } else if (ir->o == IR_XLOAD) { + /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). + ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). + */ + if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && + noconflict(as, ref, IR_XSTORE, 0)) { + asm_fusexref(as, ir->op1, xallow); + return RID_MRM; + } + } else if (ir->o == IR_VLOAD) { + asm_fuseahuref(as, ir->op1, xallow); + return RID_MRM; + } + } + if (!(as->freeset & allow) && + (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) + goto fusespill; + return ra_allocref(as, ref, allow); +} + +/* -- Calls --------------------------------------------------------------- */ + +/* Generate a call to a C function. */ +static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) +{ + uint32_t n, nargs = CCI_NARGS(ci); + int32_t ofs = STACKARG_OFS; + uint32_t gprs = REGARG_GPRS; +#if LJ_64 + Reg fpr = REGARG_FIRSTFPR; +#endif + lua_assert(!(nargs > 2 && (ci->flags&CCI_FASTCALL))); /* Avoid stack adj. */ + if ((void *)ci->func) + emit_call(as, ci->func); + for (n = 0; n < nargs; n++) { /* Setup args. */ + IRRef ref = args[n]; + IRIns *ir = IR(ref); + Reg r; +#if LJ_64 && LJ_ABI_WIN + /* Windows/x64 argument registers are strictly positional. */ + r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); + fpr++; gprs >>= 5; +#elif LJ_64 + /* POSIX/x64 argument registers are used in order of appearance. */ + if (irt_isfp(ir->t)) { + r = fpr <= REGARG_LASTFPR ? fpr : 0; fpr++; + } else { + r = gprs & 31; gprs >>= 5; + } +#else + if (irt_isfp(ir->t) || !(ci->flags & CCI_FASTCALL)) { + r = 0; + } else { + r = gprs & 31; gprs >>= 5; + } +#endif + if (r) { /* Argument is in a register. */ + if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { +#if LJ_64 + if (ir->o == IR_KINT64) + emit_loadu64(as, r, ir_kint64(ir)->u64); + else +#endif + emit_loadi(as, r, ir->i); + } else { + lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ + if (ra_hasreg(ir->r)) { + ra_noweak(as, ir->r); + emit_movrr(as, ir, r, ir->r); + } else { + ra_allocref(as, ref, RID2RSET(r)); + } + } + } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ + lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ + if (LJ_32 && (ofs & 4) && irref_isk(ref)) { + /* Split stores for unaligned FP consts. */ + emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); + emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); + } else { + r = ra_alloc1(as, ref, RSET_FPR); + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, + r, RID_ESP, ofs); + } + ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; + } else { /* Non-FP argument is on stack. */ + if (LJ_32 && ref < ASMREF_TMP1) { + emit_movmroi(as, RID_ESP, ofs, ir->i); + } else { + r = ra_alloc1(as, ref, RSET_GPR); + emit_movtomro(as, REX_64IR(ir, r), RID_ESP, ofs); + } + ofs += sizeof(intptr_t); + } + } +} + +/* Setup result reg/sp for call. Evict scratch regs. */ +static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + RegSet drop = RSET_SCRATCH; + if ((ci->flags & CCI_NOFPRCLOBBER)) + drop &= ~RSET_FPR; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); /* Evictions must be performed first. */ + if (ra_used(ir)) { + if (irt_isfp(ir->t)) { + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ +#if LJ_64 + if ((ci->flags & CCI_CASTU64)) { + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ + } else { + emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); + } + } else { + ra_destreg(as, ir, RID_FPRET); + } +#else + /* Number result is in x87 st0 for x86 calling convention. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, + dest, RID_ESP, ofs); + } + if ((ci->flags & CCI_CASTU64)) { + emit_movtomro(as, RID_RET, RID_ESP, ofs); + emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); + } else { + emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, + irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); + } +#endif + } else { + lua_assert(!irt_ispri(ir->t)); + ra_destreg(as, ir, RID_RET); + } + } else if (LJ_32 && irt_isfp(ir->t)) { + emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ + } +} + +static void asm_call(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX]; + const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; + asm_collectargs(as, ir, ci, args); + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static void asm_callx(ASMState *as, IRIns *ir) +{ + IRRef args[CCI_NARGS_MAX]; + CCallInfo ci; + IRIns *irf; + ci.flags = asm_callx_flags(as, ir); + asm_collectargs(as, ir, &ci, args); + asm_setupresult(as, ir, &ci); + irf = IR(ir->op2); + if (LJ_32 && irref_isk(ir->op2)) { /* Call to constant address on x86. */ + ci.func = (ASMFunction)(void *)(uintptr_t)(uint32_t)irf->i; + } else { + /* Prefer a non-argument register or RID_RET for indirect calls. */ + RegSet allow = (RSET_GPR & ~RSET_SCRATCH)|RID2RSET(RID_RET); + Reg r = ra_alloc1(as, ir->op2, allow); + emit_rr(as, XO_GROUP5, XOg_CALL, r); + ci.func = (ASMFunction)(void *)0; + } + asm_gencall(as, &ci, args); +} + +/* -- Returns ------------------------------------------------------------- */ + +/* Return to lower frame. Guard that it goes to the right spot. */ +static void asm_retf(ASMState *as, IRIns *ir) +{ + Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); + void *pc = ir_kptr(IR(ir->op2)); + int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); + as->topslot -= (BCReg)delta; + if ((int32_t)as->topslot < 0) as->topslot = 0; + emit_setgl(as, base, jit_base); + emit_addptr(as, base, -8*delta); + asm_guardcc(as, CC_NE); + emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); +} + +/* -- Type conversions ---------------------------------------------------- */ + +static void asm_tointg(ASMState *as, IRIns *ir, Reg left) +{ + Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_guardcc(as, CC_P); + asm_guardcc(as, CC_NE); + emit_rr(as, XO_UCOMISD, left, tmp); + emit_rr(as, XO_CVTSI2SD, tmp, dest); + if (!(as->flags & JIT_F_SPLIT_XMM)) + emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ + emit_rr(as, XO_CVTTSD2SI, dest, left); + /* Can't fuse since left is needed twice. */ +} + +static void asm_tobit(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + Reg tmp = ra_noreg(IR(ir->op1)->r) ? + ra_alloc1(as, ir->op1, RSET_FPR) : + ra_scratch(as, RSET_FPR); + Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); + emit_rr(as, XO_MOVDto, tmp, dest); + emit_mrm(as, XO_ADDSD, tmp, right); + ra_left(as, tmp, ir->op1); +} + +static void asm_conv(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); + int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); + int stfp = (st == IRT_NUM || st == IRT_FLOAT); + IRRef lref = ir->op1; + lua_assert(irt_type(ir->t) != st); + lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ + if (irt_isfp(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_FPR); + if (stfp) { /* FP to FP conversion. */ + Reg left = asm_fuseload(as, lref, RSET_FPR); + emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); + if (left == dest) return; /* Avoid the XO_XORPS. */ + } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ + /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ + cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); + Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); + if (irt_isfloat(ir->t)) + emit_rr(as, XO_CVTSD2SS, dest, dest); + emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ + emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ + emit_loadn(as, bias, k); + emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); + return; + } else { /* Integer to FP conversion. */ + Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? + ra_alloc1(as, lref, RSET_GPR) : + asm_fuseload(as, lref, RSET_GPR); + if (LJ_64 && st == IRT_U64) { + MCLabel l_end = emit_label(as); + const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); + emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ + } + emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, + dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); + } + if (!(as->flags & JIT_F_SPLIT_XMM)) + emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ + } else if (stfp) { /* FP to integer conversion. */ + if (irt_isguard(ir->t)) { + /* Checked conversions are only supported from number to int. */ + lua_assert(irt_isint(ir->t) && st == IRT_NUM); + asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + x86Op op = st == IRT_NUM ? + ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) : + ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI); + if (LJ_32 && irt_isu32(ir->t)) { /* FP to U32 conversion on x86. */ + /* u32 = (int32_t)(number - 2^31) + 2^31 */ + Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : + ra_scratch(as, RSET_FPR); + emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); + emit_rr(as, op, dest, tmp); + if (st == IRT_NUM) + emit_rma(as, XO_ADDSD, tmp, + lj_ir_k64_find(as->J, U64x(c1e00000,00000000))); + else + emit_rma(as, XO_ADDSS, tmp, + lj_ir_k64_find(as->J, U64x(00000000,cf000000))); + ra_left(as, tmp, lref); + } else if (LJ_64 && irt_isu64(ir->t)) { + /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ + Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : + ra_scratch(as, RSET_FPR); + MCLabel l_end = emit_label(as); + emit_rr(as, op, dest|REX_64, tmp); + if (st == IRT_NUM) + emit_rma(as, XO_ADDSD, tmp, + lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); + else + emit_rma(as, XO_ADDSS, tmp, + lj_ir_k64_find(as->J, U64x(00000000,df800000))); + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest < 2^63. */ + emit_rr(as, op, dest|REX_64, tmp); + ra_left(as, tmp, lref); + } else { + Reg left = asm_fuseload(as, lref, RSET_FPR); + if (LJ_64 && irt_isu32(ir->t)) + emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ + emit_mrm(as, op, + dest|((LJ_64 && + (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), + left); + } + } + } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ + Reg left, dest = ra_dest(as, ir, RSET_GPR); + RegSet allow = RSET_GPR; + x86Op op; + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); + if (st == IRT_I8) { + op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; + } else if (st == IRT_U8) { + op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; + } else if (st == IRT_I16) { + op = XO_MOVSXw; + } else { + op = XO_MOVZXw; + } + left = asm_fuseload(as, lref, allow); + /* Add extra MOV if source is already in wrong register. */ + if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { + Reg tmp = ra_scratch(as, allow); + emit_rr(as, op, dest, tmp); + emit_rr(as, XO_MOV, tmp, left); + } else { + emit_mrm(as, op, dest, left); + } + } else { /* 32/64 bit integer conversions. */ + if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } else if (irt_is64(ir->t)) { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st64 || !(ir->op2 & IRCONV_SEXT)) { + /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } else { /* 32 to 64 bit sign extension. */ + Reg left = asm_fuseload(as, lref, RSET_GPR); + emit_mrm(as, XO_MOVSXd, dest|REX_64, left); + } + } else { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (st64) { + Reg left = asm_fuseload(as, lref, RSET_GPR); + /* This is either a 32 bit reg/reg mov which zeroes the hiword + ** or a load of the loword from a 64 bit address. + */ + emit_mrm(as, XO_MOV, dest, left); + } else { /* 32/32 bit no-op (cast). */ + ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ + } + } + } +} + +#if LJ_32 && LJ_HASFFI +/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ + +/* 64 bit integer to FP conversion in 32 bit mode. */ +static void asm_conv_fp_int64(ASMState *as, IRIns *ir) +{ + Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); + Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, + dest, RID_ESP, ofs); + } + emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, + irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); + if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { + /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ + MCLabel l_end = emit_label(as); + emit_rma(as, XO_FADDq, XOg_FADDq, + lj_ir_k64_find(as->J, U64x(43f00000,00000000))); + emit_sjcc(as, CC_NS, l_end); + emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ + } else { + lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); + } + emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); + /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ + emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); + emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); +} + +/* FP to 64 bit integer conversion in 32 bit mode. */ +static void asm_conv_int64_fp(ASMState *as, IRIns *ir) +{ + IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); + IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); + Reg lo, hi; + lua_assert(st == IRT_NUM || st == IRT_FLOAT); + lua_assert(dt == IRT_I64 || dt == IRT_U64); + lua_assert(((ir-1)->op2 & IRCONV_TRUNC)); + hi = ra_dest(as, ir, RSET_GPR); + lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); + if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); + /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ + if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ + emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); + emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); + emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); + } + if (dt == IRT_U64) { + /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ + MCLabel l_pop, l_end = emit_label(as); + emit_x87op(as, XI_FPOP); + l_pop = emit_label(as); + emit_sjmp(as, l_end); + emit_rmro(as, XO_MOV, hi, RID_ESP, 4); + if ((as->flags & JIT_F_SSE3)) + emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); + else + emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); + emit_rma(as, XO_FADDq, XOg_FADDq, + lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); + emit_sjcc(as, CC_NS, l_pop); + emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ + } + emit_rmro(as, XO_MOV, hi, RID_ESP, 4); + if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ + emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); + } else { /* Otherwise set FPU rounding mode to truncate before the store. */ + emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); + emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); + emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); + emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); + emit_loadi(as, lo, 0xc00); + emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); + } + if (dt == IRT_U64) + emit_x87op(as, XI_FDUP); + emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, + st == IRT_NUM ? XOg_FLDq: XOg_FLDd, + asm_fuseload(as, ir->op1, RSET_EMPTY)); +} +#endif + +static void asm_strto(ASMState *as, IRIns *ir) +{ + /* Force a spill slot for the destination register (if any). */ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum]; + IRRef args[2]; + RegSet drop = RSET_SCRATCH; + if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) + rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ + ra_evictset(as, drop); + asm_guardcc(as, CC_E); + emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ + args[0] = ir->op1; /* GCstr *str */ + args[1] = ASMREF_TMP1; /* TValue *n */ + asm_gencall(as, ci, args); + /* Store the result to the spill slot or temp slots. */ + emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, + RID_ESP, sps_scale(ir->s)); +} + +static void asm_tostr(ASMState *as, IRIns *ir) +{ + IRIns *irl = IR(ir->op1); + IRRef args[2]; + args[0] = ASMREF_L; + as->gcsteps++; + if (irt_isnum(irl->t)) { + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; + args[1] = ASMREF_TMP1; /* const lua_Number * */ + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); + emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, + RID_ESP, ra_spill(as, irl)); + } else { + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; + args[1] = ir->op1; /* int32_t k */ + asm_setupresult(as, ir, ci); /* GCstr * */ + asm_gencall(as, ci, args); + } +} + +/* -- Memory references --------------------------------------------------- */ + +static void asm_aref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusearef(as, ir, RSET_GPR); + if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) + emit_mrm(as, XO_LEA, dest, RID_MRM); + else if (as->mrm.base != dest) + emit_rr(as, XO_MOV, dest, as->mrm.base); +} + +/* Merge NE(HREF, niltv) check. */ +static MCode *merge_href_niltv(ASMState *as, IRIns *ir) +{ + /* Assumes nothing else generates NE of HREF. */ + if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins && + ra_hasreg(ir->r)) { + MCode *p = as->mcp; + p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6; + /* Ensure no loop branch inversion happened. */ + if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) { + as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */ + return p + *(int32_t *)(p-4); /* Return exit address. */ + } + } + return NULL; +} + +/* Inlined hash lookup. Specialized for key type and for const keys. +** The equivalent C code is: +** Node *n = hashkey(t, key); +** do { +** if (lj_obj_equal(&n->key, key)) return &n->val; +** } while ((n = nextnode(n))); +** return niltv(L); +*/ +static void asm_href(ASMState *as, IRIns *ir) +{ + MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */ + RegSet allow = RSET_GPR; + Reg dest = ra_dest(as, ir, allow); + Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); + Reg key = RID_NONE, tmp = RID_NONE; + IRIns *irkey = IR(ir->op2); + int isk = irref_isk(ir->op2); + IRType1 kt = irkey->t; + uint32_t khash; + MCLabel l_end, l_loop, l_next; + + if (!isk) { + rset_clear(allow, tab); + key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); + if (!irt_isstr(kt)) + tmp = ra_scratch(as, rset_exclude(allow, key)); + } + + /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */ + l_end = emit_label(as); + if (nilexit && ir[1].o == IR_NE) { + emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */ + nilexit = NULL; + } else { + emit_loada(as, dest, niltvg(J2G(as->J))); + } + + /* Follow hash chain until the end. */ + l_loop = emit_sjcc_label(as, CC_NZ); + emit_rr(as, XO_TEST, dest, dest); + emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); + l_next = emit_label(as); + + /* Type and value comparison. */ + if (nilexit) + emit_jcc(as, CC_E, nilexit); + else + emit_sjcc(as, CC_E, l_end); + if (irt_isnum(kt)) { + if (isk) { + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), + (int32_t)ir_knum(irkey)->u32.lo); + emit_sjcc(as, CC_NE, l_next); + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), + (int32_t)ir_knum(irkey)->u32.hi); + } else { + emit_sjcc(as, CC_P, l_next); + emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); + emit_sjcc(as, CC_AE, l_next); + /* The type check avoids NaN penalties and complaints from Valgrind. */ +#if LJ_64 + emit_u32(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); +#else + emit_i8(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); +#endif + } +#if LJ_64 + } else if (irt_islightud(kt)) { + emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); +#endif + } else { + if (!irt_ispri(kt)) { + lua_assert(irt_isaddr(kt)); + if (isk) + emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), + ptr2addr(ir_kgc(irkey))); + else + emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); + emit_sjcc(as, CC_NE, l_next); + } + lua_assert(!irt_isnil(kt)); + emit_i8(as, irt_toitype(kt)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); + } + emit_sfixup(as, l_loop); + checkmclim(as); + + /* Load main position relative to tab->node into dest. */ + khash = isk ? ir_khash(irkey) : 1; + if (khash == 0) { + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); + } else { + emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); + if ((as->flags & JIT_F_PREFER_IMUL)) { + emit_i8(as, sizeof(Node)); + emit_rr(as, XO_IMULi8, dest, dest); + } else { + emit_shifti(as, XOg_SHL, dest, 3); + emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); + } + if (isk) { + emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); + } else if (irt_isstr(kt)) { + emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); + emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); + } else { /* Must match with hashrot() in lj_tab.c. */ + emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); + emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); + emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); + emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); + emit_shifti(as, XOg_ROL, dest, HASH_ROT2); + emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); + emit_shifti(as, XOg_ROL, dest, HASH_ROT1); + emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); + if (irt_isnum(kt)) { + emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); +#if LJ_64 + emit_shifti(as, XOg_SHR|REX_64, dest, 32); + emit_rr(as, XO_MOV, tmp, dest); + emit_rr(as, XO_MOVDto, key|REX_64, dest); +#else + emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); + emit_rr(as, XO_MOVDto, key, tmp); +#endif + } else { + emit_rr(as, XO_MOV, tmp, key); + emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); + } + } + } +} + +static void asm_hrefk(ASMState *as, IRIns *ir) +{ + IRIns *kslot = IR(ir->op2); + IRIns *irkey = IR(kslot->op1); + int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); + Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; + Reg node = ra_alloc1(as, ir->op1, RSET_GPR); +#if !LJ_64 + MCLabel l_exit; +#endif + lua_assert(ofs % sizeof(Node) == 0); + if (ra_hasreg(dest)) { + if (ofs != 0) { + if (dest == node && !(as->flags & JIT_F_LEA_AGU)) + emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); + else + emit_rmro(as, XO_LEA, dest, node, ofs); + } else if (dest != node) { + emit_rr(as, XO_MOV, dest, node); + } + } + asm_guardcc(as, CC_NE); +#if LJ_64 + if (!irt_ispri(irkey->t)) { + Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); + emit_rmro(as, XO_CMP, key|REX_64, node, + ofs + (int32_t)offsetof(Node, key.u64)); + lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : + ((uint64_t)irt_toitype(irkey->t) << 32) | + (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); + } else { + lua_assert(!irt_isnil(irkey->t)); + emit_i8(as, irt_toitype(irkey->t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, node, + ofs + (int32_t)offsetof(Node, key.it)); + } +#else + l_exit = emit_label(as); + if (irt_isnum(irkey->t)) { + /* Assumes -0.0 is already canonicalized to +0.0. */ + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.u32.lo), + (int32_t)ir_knum(irkey)->u32.lo); + emit_sjcc(as, CC_NE, l_exit); + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.u32.hi), + (int32_t)ir_knum(irkey)->u32.hi); + } else { + if (!irt_ispri(irkey->t)) { + lua_assert(irt_isgcv(irkey->t)); + emit_gmroi(as, XG_ARITHi(XOg_CMP), node, + ofs + (int32_t)offsetof(Node, key.gcr), + ptr2addr(ir_kgc(irkey))); + emit_sjcc(as, CC_NE, l_exit); + } + lua_assert(!irt_isnil(irkey->t)); + emit_i8(as, irt_toitype(irkey->t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, node, + ofs + (int32_t)offsetof(Node, key.it)); + } +#endif +} + +static void asm_newref(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; + IRRef args[3]; + IRIns *irkey; + Reg tmp; + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ir->op1; /* GCtab *t */ + args[2] = ASMREF_TMP1; /* cTValue *key */ + asm_setupresult(as, ir, ci); /* TValue * */ + asm_gencall(as, ci, args); + tmp = ra_releasetmp(as, ASMREF_TMP1); + irkey = IR(ir->op2); + if (irt_isnum(irkey->t)) { + /* For numbers use the constant itself or a spill slot as a TValue. */ + if (irref_isk(ir->op2)) + emit_loada(as, tmp, ir_knum(irkey)); + else + emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey)); + } else { + /* Otherwise use g->tmptv to hold the TValue. */ + if (!irref_isk(ir->op2)) { + Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp)); + emit_movtomro(as, REX_64IR(irkey, src), tmp, 0); + } else if (!irt_ispri(irkey->t)) { + emit_movmroi(as, tmp, 0, irkey->i); + } + if (!(LJ_64 && irt_islightud(irkey->t))) + emit_movmroi(as, tmp, 4, irt_toitype(irkey->t)); + emit_loada(as, tmp, &J2G(as->J)->tmptv); + } +} + +static void asm_uref(ASMState *as, IRIns *ir) +{ + /* NYI: Check that UREFO is still open and not aliasing a slot. */ + Reg dest = ra_dest(as, ir, RSET_GPR); + if (irref_isk(ir->op1)) { + GCfunc *fn = ir_kfunc(IR(ir->op1)); + MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; + emit_rma(as, XO_MOV, dest, v); + } else { + Reg uv = ra_scratch(as, RSET_GPR); + Reg func = ra_alloc1(as, ir->op1, RSET_GPR); + if (ir->o == IR_UREFC) { + emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); + asm_guardcc(as, CC_NE); + emit_i8(as, 1); + emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); + } else { + emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); + } + emit_rmro(as, XO_MOV, uv, func, + (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); + } +} + +static void asm_fref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusefref(as, ir, RSET_GPR); + emit_mrm(as, XO_LEA, dest, RID_MRM); +} + +static void asm_strref(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + asm_fusestrref(as, ir, RSET_GPR); + if (as->mrm.base == RID_NONE) + emit_loadi(as, dest, as->mrm.ofs); + else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) + emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); + else + emit_mrm(as, XO_LEA, dest, RID_MRM); +} + +/* -- Loads and stores ---------------------------------------------------- */ + +static void asm_fxload(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); + x86Op xo; + if (ir->o == IR_FLOAD) + asm_fusefref(as, ir, RSET_GPR); + else + asm_fusexref(as, ir->op1, RSET_GPR); + /* ir->op2 is ignored -- unaligned loads are ok on x86. */ + switch (irt_type(ir->t)) { + case IRT_I8: xo = XO_MOVSXb; break; + case IRT_U8: xo = XO_MOVZXb; break; + case IRT_I16: xo = XO_MOVSXw; break; + case IRT_U16: xo = XO_MOVZXw; break; + case IRT_NUM: xo = XMM_MOVRM(as); break; + case IRT_FLOAT: xo = XO_MOVSS; break; + default: + if (LJ_64 && irt_is64(ir->t)) + dest |= REX_64; + else + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); + xo = XO_MOV; + break; + } + emit_mrm(as, xo, dest, RID_MRM); +} + +static void asm_fxstore(ASMState *as, IRIns *ir) +{ + RegSet allow = RSET_GPR; + Reg src = RID_NONE, osrc = RID_NONE; + int32_t k = 0; + /* The IRT_I16/IRT_U16 stores should never be simplified for constant + ** values since mov word [mem], imm16 has a length-changing prefix. + */ + if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || + !asm_isk32(as, ir->op2, &k)) { + RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : + (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; + src = osrc = ra_alloc1(as, ir->op2, allow8); + if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ + rset_clear(allow, osrc); + src = ra_scratch(as, allow8); + } + rset_clear(allow, src); + } + if (ir->o == IR_FSTORE) + asm_fusefref(as, IR(ir->op1), allow); + else + asm_fusexref(as, ir->op1, allow); + /* ir->op2 is ignored -- unaligned stores are ok on x86. */ + if (ra_hasreg(src)) { + x86Op xo; + switch (irt_type(ir->t)) { + case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; + case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; + case IRT_NUM: xo = XO_MOVSDto; break; + case IRT_FLOAT: xo = XO_MOVSSto; break; +#if LJ_64 + case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ +#endif + default: + if (LJ_64 && irt_is64(ir->t)) + src |= REX_64; + else + lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); + xo = XO_MOVto; + break; + } + emit_mrm(as, xo, src, RID_MRM); + if (!LJ_64 && src != osrc) { + ra_noweak(as, osrc); + emit_rr(as, XO_MOV, src, osrc); + } + } else { + if (irt_isi8(ir->t) || irt_isu8(ir->t)) { + emit_i8(as, k); + emit_mrm(as, XO_MOVmib, 0, RID_MRM); + } else { + lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || + irt_isaddr(ir->t)); + emit_i32(as, k); + emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); + } + } +} + +#if LJ_64 +static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) +{ + if (ra_used(ir) || typecheck) { + Reg dest = ra_dest(as, ir, RSET_GPR); + if (typecheck) { + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); + asm_guardcc(as, CC_NE); + emit_i8(as, -2); + emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); + emit_shifti(as, XOg_SAR|REX_64, tmp, 47); + emit_rr(as, XO_MOV, tmp|REX_64, dest); + } + return dest; + } else { + return RID_NONE; + } +} +#endif + +static void asm_ahuvload(ASMState *as, IRIns *ir) +{ + lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || + (LJ_DUALNUM && irt_isint(ir->t))); +#if LJ_64 + if (irt_islightud(ir->t)) { + Reg dest = asm_load_lightud64(as, ir, 1); + if (ra_hasreg(dest)) { + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); + } + return; + } else +#endif + if (ra_used(ir)) { + RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; + Reg dest = ra_dest(as, ir, allow); + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM); + } else { + asm_fuseahuref(as, ir->op1, RSET_GPR); + } + /* Always do the type check, even if the load result is unused. */ + as->mrm.ofs += 4; + asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); + if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { + lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); + emit_u32(as, LJ_TISNUM); + emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); + } else { + emit_i8(as, irt_toitype(ir->t)); + emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); + } +} + +static void asm_ahustore(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ir->op2, RSET_FPR); + asm_fuseahuref(as, ir->op1, RSET_GPR); + emit_mrm(as, XO_MOVSDto, src, RID_MRM); +#if LJ_64 + } else if (irt_islightud(ir->t)) { + Reg src = ra_alloc1(as, ir->op2, RSET_GPR); + asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); + emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); +#endif + } else { + IRIns *irr = IR(ir->op2); + RegSet allow = RSET_GPR; + Reg src = RID_NONE; + if (!irref_isk(ir->op2)) { + src = ra_alloc1(as, ir->op2, allow); + rset_clear(allow, src); + } + asm_fuseahuref(as, ir->op1, allow); + if (ra_hasreg(src)) { + emit_mrm(as, XO_MOVto, src, RID_MRM); + } else if (!irt_ispri(irr->t)) { + lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); + emit_i32(as, irr->i); + emit_mrm(as, XO_MOVmi, 0, RID_MRM); + } + as->mrm.ofs += 4; + emit_i32(as, (int32_t)irt_toitype(ir->t)); + emit_mrm(as, XO_MOVmi, 0, RID_MRM); + } +} + +static void asm_sload(ASMState *as, IRIns *ir) +{ + int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); + IRType1 t = ir->t; + Reg base; + lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ + lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); + lua_assert(LJ_DUALNUM || + !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); + if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { + Reg left = ra_scratch(as, RSET_FPR); + asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ + base = ra_alloc1(as, REF_BASE, RSET_GPR); + emit_rmro(as, XMM_MOVRM(as), left, base, ofs); + t.irt = IRT_NUM; /* Continue with a regular number type check. */ +#if LJ_64 + } else if (irt_islightud(t)) { + Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); + if (ra_hasreg(dest)) { + base = ra_alloc1(as, REF_BASE, RSET_GPR); + emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); + } + return; +#endif + } else if (ra_used(ir)) { + RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; + Reg dest = ra_dest(as, ir, allow); + base = ra_alloc1(as, REF_BASE, RSET_GPR); + lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); + if ((ir->op2 & IRSLOAD_CONVERT)) { + t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ + emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs); + } else if (irt_isnum(t)) { + emit_rmro(as, XMM_MOVRM(as), dest, base, ofs); + } else { + emit_rmro(as, XO_MOV, dest, base, ofs); + } + } else { + if (!(ir->op2 & IRSLOAD_TYPECHECK)) + return; /* No type check: avoid base alloc. */ + base = ra_alloc1(as, REF_BASE, RSET_GPR); + } + if ((ir->op2 & IRSLOAD_TYPECHECK)) { + /* Need type check, even if the load result is unused. */ + asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); + if (LJ_64 && irt_type(t) >= IRT_NUM) { + lua_assert(irt_isinteger(t) || irt_isnum(t)); + emit_u32(as, LJ_TISNUM); + emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); + } else { + emit_i8(as, irt_toitype(t)); + emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); + } + } +} + +/* -- Allocations --------------------------------------------------------- */ + +#if LJ_HASFFI +static void asm_cnew(ASMState *as, IRIns *ir) +{ + CTState *cts = ctype_ctsG(J2G(as->J)); + CTypeID typeid = (CTypeID)IR(ir->op1)->i; + CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? + lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i; + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; + IRRef args[2]; + lua_assert(sz != CTSIZE_INVALID); + + args[0] = ASMREF_L; /* lua_State *L */ + args[1] = ASMREF_TMP1; /* MSize size */ + as->gcsteps++; + asm_setupresult(as, ir, ci); /* GCcdata * */ + + /* Initialize immutable cdata object. */ + if (ir->o == IR_CNEWI) { + RegSet allow = (RSET_GPR & ~RSET_SCRATCH); +#if LJ_64 + Reg r64 = sz == 8 ? REX_64 : 0; + if (irref_isk(ir->op2)) { + IRIns *irk = IR(ir->op2); + uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : + (uint64_t)(uint32_t)irk->i; + if (sz == 4 || checki32((int64_t)k)) { + emit_i32(as, (int32_t)k); + emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); + } else { + emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); + emit_loadu64(as, RID_ECX, k); + } + } else { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); + } +#else + int32_t ofs = sizeof(GCcdata); + if (sz == 8) { + ofs += 4; ir++; + lua_assert(ir->o == IR_HIOP); + } + do { + if (irref_isk(ir->op2)) { + emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); + } else { + Reg r = ra_alloc1(as, ir->op2, allow); + emit_movtomro(as, r, RID_RET, ofs); + rset_clear(allow, r); + } + if (ofs == sizeof(GCcdata)) break; + ofs -= 4; ir--; + } while (1); +#endif + lua_assert(sz == 4 || sz == 8); + } + + /* Combine initialization of marked, gct and typeid. */ + emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); + emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, + (int32_t)((~LJ_TCDATA<<8)+(typeid<<16))); + emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); + emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); + + asm_gencall(as, ci, args); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); +} +#else +#define asm_cnew(as, ir) ((void)0) +#endif + +/* -- Write barriers ------------------------------------------------------ */ + +static void asm_tbar(ASMState *as, IRIns *ir) +{ + Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); + Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); + MCLabel l_end = emit_label(as); + emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); + emit_setgl(as, tab, gc.grayagain); + emit_getgl(as, tmp, gc.grayagain); + emit_i8(as, ~LJ_GC_BLACK); + emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_BLACK); + emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); +} + +static void asm_obar(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; + IRRef args[2]; + MCLabel l_end; + Reg obj; + /* No need for other object barriers (yet). */ + lua_assert(IR(ir->op1)->o == IR_UREFC); + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ir->op1; /* TValue *tv */ + asm_gencall(as, ci, args); + emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); + obj = IR(ir->op1)->r; + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_WHITES); + if (irref_isk(ir->op2)) { + GCobj *vp = ir_kgc(IR(ir->op2)); + emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); + } else { + Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); + emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); + } + emit_sjcc(as, CC_Z, l_end); + emit_i8(as, LJ_GC_BLACK); + emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, + (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); +} + +/* -- FP/int arithmetic and logic operations ------------------------------ */ + +/* Load reference onto x87 stack. Force a spill to memory if needed. */ +static void asm_x87load(ASMState *as, IRRef ref) +{ + IRIns *ir = IR(ref); + if (ir->o == IR_KNUM) { + cTValue *tv = ir_knum(ir); + if (tvispzero(tv)) /* Use fldz only for +0. */ + emit_x87op(as, XI_FLDZ); + else if (tvispone(tv)) + emit_x87op(as, XI_FLD1); + else + emit_rma(as, XO_FLDq, XOg_FLDq, tv); + } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && + !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { + IRIns *iri = IR(ir->op1); + emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); + } else { + emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); + } +} + +/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */ +static int fpmjoin_pow(ASMState *as, IRIns *ir) +{ + IRIns *irp = IR(ir->op1); + if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { + IRIns *irpp = IR(irp->op1); + if (irpp == ir-2 && irpp->o == IR_FPMATH && + irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); + IRIns *irx; + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + ra_destreg(as, ir, RID_XMM0); + emit_call(as, lj_vm_pow_sse); + irx = IR(irpp->op1); + if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1) + irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */ + ra_left(as, RID_XMM0, irpp->op1); + ra_left(as, RID_XMM1, irp->op2); + return 1; + } + } + return 0; +} + +static void asm_fpmath(ASMState *as, IRIns *ir) +{ + IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER; + if (fpm == IRFPM_SQRT) { + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = asm_fuseload(as, ir->op1, RSET_FPR); + emit_mrm(as, XO_SQRTSD, dest, left); + } else if (fpm <= IRFPM_TRUNC) { + if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ + Reg dest = ra_dest(as, ir, RSET_FPR); + Reg left = asm_fuseload(as, ir->op1, RSET_FPR); + /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. + ** Let's pretend it's a 3-byte opcode, and compensate afterwards. + ** This is atrocious, but the alternatives are much worse. + */ + /* Round down/up/trunc == 1001/1010/1011. */ + emit_i8(as, 0x09 + fpm); + emit_mrm(as, XO_ROUNDSD, dest, left); + if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { + as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ + } + *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ + } else { /* Call helper functions for SSE2 variant. */ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + ra_destreg(as, ir, RID_XMM0); + emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : + fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); + ra_left(as, RID_XMM0, ir->op1); + } + } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) { + /* Rejoined to pow(). */ + } else { /* Handle x87 ops. */ + int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ + Reg dest = ir->r; + if (ra_hasreg(dest)) { + ra_free(as, dest); + ra_modified(as, dest); + emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs); + } + emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); + switch (fpm) { /* st0 = lj_vm_*(st0) */ + case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break; + case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break; + case IRFPM_SIN: emit_x87op(as, XI_FSIN); break; + case IRFPM_COS: emit_x87op(as, XI_FCOS); break; + case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break; + case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10: + /* Note: the use of fyl2xp1 would be pointless here. When computing + ** log(1.0+eps) the precision is already lost after 1.0 is added. + ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense. + */ + emit_x87op(as, XI_FYL2X); break; + case IRFPM_OTHER: + switch (ir->o) { + case IR_ATAN2: + emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break; + case IR_LDEXP: + emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break; + default: lua_assert(0); break; + } + break; + default: lua_assert(0); break; + } + asm_x87load(as, ir->op1); + switch (fpm) { + case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break; + case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break; + case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break; + case IRFPM_OTHER: + if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2); + break; + default: break; + } + } +} + +static void asm_fppowi(ASMState *as, IRIns *ir) +{ + /* The modified regs must match with the *.dasc implementation. */ + RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); + if (ra_hasreg(ir->r)) + rset_clear(drop, ir->r); /* Dest reg handled below. */ + ra_evictset(as, drop); + ra_destreg(as, ir, RID_XMM0); + emit_call(as, lj_vm_powi_sse); + ra_left(as, RID_XMM0, ir->op1); + ra_left(as, RID_EAX, ir->op2); +} + +#if LJ_64 && LJ_HASFFI +static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id) +{ + const CCallInfo *ci = &lj_ir_callinfo[id]; + IRRef args[2]; + args[0] = ir->op1; + args[1] = ir->op2; + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} +#endif + +static void asm_intmod(ASMState *as, IRIns *ir) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi]; + IRRef args[2]; + args[0] = ir->op1; + args[1] = ir->op2; + asm_setupresult(as, ir, ci); + asm_gencall(as, ci, args); +} + +static int asm_swapops(ASMState *as, IRIns *ir) +{ + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + lua_assert(ra_noreg(irr->r)); + if (!irm_iscomm(lj_ir_mode[ir->o])) + return 0; /* Can't swap non-commutative operations. */ + if (irref_isk(ir->op2)) + return 0; /* Don't swap constants to the left. */ + if (ra_hasreg(irl->r)) + return 1; /* Swap if left already has a register. */ + if (ra_samehint(ir->r, irr->r)) + return 1; /* Swap if dest and right have matching hints. */ + if (as->curins > as->loopref) { /* In variant part? */ + if (ir->op2 < as->loopref && !irt_isphi(irr->t)) + return 0; /* Keep invariants on the right. */ + if (ir->op1 < as->loopref && !irt_isphi(irl->t)) + return 1; /* Swap invariants to the right. */ + } + if (opisfusableload(irl->o)) + return 1; /* Swap fusable loads to the right. */ + return 0; /* Otherwise don't swap. */ +} + +static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) +{ + IRRef lref = ir->op1; + IRRef rref = ir->op2; + RegSet allow = RSET_FPR; + Reg dest; + Reg right = IR(rref)->r; + if (ra_hasreg(right)) { + rset_clear(allow, right); + ra_noweak(as, right); + } + dest = ra_dest(as, ir, allow); + if (lref == rref) { + right = dest; + } else if (ra_noreg(right)) { + if (asm_swapops(as, ir)) { + IRRef tmp = lref; lref = rref; rref = tmp; + } + right = asm_fuseload(as, rref, rset_clear(allow, dest)); + } + emit_mrm(as, xo, dest, right); + ra_left(as, dest, lref); +} + +static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) +{ + IRRef lref = ir->op1; + IRRef rref = ir->op2; + RegSet allow = RSET_GPR; + Reg dest, right; + int32_t k = 0; + if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ + as->flagmcp = NULL; + as->mcp += (LJ_64 && *as->mcp != XI_TEST) ? 3 : 2; + } + right = IR(rref)->r; + if (ra_hasreg(right)) { + rset_clear(allow, right); + ra_noweak(as, right); + } + dest = ra_dest(as, ir, allow); + if (lref == rref) { + right = dest; + } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { + if (asm_swapops(as, ir)) { + IRRef tmp = lref; lref = rref; rref = tmp; + } + right = asm_fuseload(as, rref, rset_clear(allow, dest)); + } + if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ + asm_guardcc(as, CC_O); + if (xa != XOg_X_IMUL) { + if (ra_hasreg(right)) + emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); + else + emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); + } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ + emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); + } else { /* IMUL r, r, k. */ + /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ + Reg left = asm_fuseload(as, lref, RSET_GPR); + x86Op xo; + if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; + } else { emit_i32(as, k); xo = XO_IMULi; } + emit_mrm(as, xo, REX_64IR(ir, dest), left); + return; + } + ra_left(as, dest, lref); +} + +/* LEA is really a 4-operand ADD with an independent destination register, +** up to two source registers and an immediate. One register can be scaled +** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several +** instructions. +** +** Currently only a few common cases are supported: +** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated +** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b +** - Right ADD fusion: y = a+(b+k) +** The ommited variants have already been reduced by FOLD. +** +** There are more fusion opportunities, like gathering shifts or joining +** common references. But these are probably not worth the trouble, since +** array indexing is not decomposed and already makes use of all fields +** of the ModRM operand. +*/ +static int asm_lea(ASMState *as, IRIns *ir) +{ + IRIns *irl = IR(ir->op1); + IRIns *irr = IR(ir->op2); + RegSet allow = RSET_GPR; + Reg dest; + as->mrm.base = as->mrm.idx = RID_NONE; + as->mrm.scale = XM_SCALE1; + as->mrm.ofs = 0; + if (ra_hasreg(irl->r)) { + rset_clear(allow, irl->r); + ra_noweak(as, irl->r); + as->mrm.base = irl->r; + if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { + /* The PHI renaming logic does a better job in some cases. */ + if (ra_hasreg(ir->r) && + ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || + (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) + return 0; + if (irref_isk(ir->op2)) { + as->mrm.ofs = irr->i; + } else { + rset_clear(allow, irr->r); + ra_noweak(as, irr->r); + as->mrm.idx = irr->r; + } + } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && + irref_isk(irr->op2)) { + Reg idx = ra_alloc1(as, irr->op1, allow); + rset_clear(allow, idx); + as->mrm.idx = (uint8_t)idx; + as->mrm.ofs = IR(irr->op2)->i; + } else { + return 0; + } + } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && + (irref_isk(ir->op2) || irref_isk(irl->op2))) { + Reg idx, base = ra_alloc1(as, irl->op1, allow); + rset_clear(allow, base); + as->mrm.base = (uint8_t)base; + if (irref_isk(ir->op2)) { + as->mrm.ofs = irr->i; + idx = ra_alloc1(as, irl->op2, allow); + } else { + as->mrm.ofs = IR(irl->op2)->i; + idx = ra_alloc1(as, ir->op2, allow); + } + rset_clear(allow, idx); + as->mrm.idx = (uint8_t)idx; + } else { + return 0; + } + dest = ra_dest(as, ir, allow); + emit_mrm(as, XO_LEA, dest, RID_MRM); + return 1; /* Success. */ +} + +static void asm_add(ASMState *as, IRIns *ir) +{ + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_ADDSD); + else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || + irt_is64(ir->t) || !asm_lea(as, ir)) + asm_intarith(as, ir, XOg_ADD); +} + +static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); + ra_left(as, dest, ir->op1); +} + +static void asm_min_max(ASMState *as, IRIns *ir, int cc) +{ + Reg right, dest = ra_dest(as, ir, RSET_GPR); + IRRef lref = ir->op1, rref = ir->op2; + if (irref_isk(rref)) { lref = rref; rref = ir->op1; } + right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); + emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); + emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); + ra_left(as, dest, lref); +} + +static void asm_bitswap(ASMState *as, IRIns *ir) +{ + Reg dest = ra_dest(as, ir, RSET_GPR); + as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), + REX_64IR(ir, dest), 0, 0, as->mcp, 1); + ra_left(as, dest, ir->op1); +} + +static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) +{ + IRRef rref = ir->op2; + IRIns *irr = IR(rref); + Reg dest; + if (irref_isk(rref)) { /* Constant shifts. */ + int shift; + dest = ra_dest(as, ir, RSET_GPR); + shift = irr->i & (irt_is64(ir->t) ? 63 : 31); + switch (shift) { + case 0: break; + case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; + default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; + } + } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ + Reg right; + dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); + if (dest == RID_ECX) { + dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX)); + emit_rr(as, XO_MOV, RID_ECX, dest); + } + right = irr->r; + if (ra_noreg(right)) + right = ra_allocref(as, rref, RID2RSET(RID_ECX)); + else if (right != RID_ECX) + ra_scratch(as, RID2RSET(RID_ECX)); + emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); + if (right != RID_ECX) { + ra_noweak(as, right); + emit_rr(as, XO_MOV, RID_ECX, right); + } + } + ra_left(as, dest, ir->op1); + /* + ** Note: avoid using the flags resulting from a shift or rotate! + ** All of them cause a partial flag stall, except for r,1 shifts + ** (but not rotates). And a shift count of 0 leaves the flags unmodified. + */ +} + +/* -- Comparisons --------------------------------------------------------- */ + +/* Virtual flags for unordered FP comparisons. */ +#define VCC_U 0x1000 /* Unordered. */ +#define VCC_P 0x2000 /* Needs extra CC_P branch. */ +#define VCC_S 0x4000 /* Swap avoids CC_P branch. */ +#define VCC_PS (VCC_P|VCC_S) + +/* Map of comparisons to flags. ORDER IR. */ +#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) +static const uint16_t asm_compmap[IR_ABC+1] = { + /* signed non-eq unsigned flags */ + /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), + /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), + /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), + /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), + /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), + /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), + /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), + /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), + /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), + /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), + /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ +}; + +/* FP and integer comparisons. */ +static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc) +{ + if (irt_isnum(ir->t)) { + IRRef lref = ir->op1; + IRRef rref = ir->op2; + Reg left, right; + MCLabel l_around; + /* + ** An extra CC_P branch is required to preserve ordered/unordered + ** semantics for FP comparisons. This can be avoided by swapping + ** the operands and inverting the condition (except for EQ and UNE). + ** So always try to swap if possible. + ** + ** Another option would be to swap operands to achieve better memory + ** operand fusion. But it's unlikely that this outweighs the cost + ** of the extra branches. + */ + if (cc & VCC_S) { /* Swap? */ + IRRef tmp = lref; lref = rref; rref = tmp; + cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ + } + left = ra_alloc1(as, lref, RSET_FPR); + right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); + l_around = emit_label(as); + asm_guardcc(as, cc >> 4); + if (cc & VCC_P) { /* Extra CC_P branch required? */ + if (!(cc & VCC_U)) { + asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ + } else if (l_around != as->invmcp) { + emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ + } else { + /* Patched to mcloop by asm_loop_fixup. */ + as->loopinv = 2; + if (as->realign) + emit_sjcc(as, CC_P, as->mcp); + else + emit_jcc(as, CC_P, as->mcp); + } + } + emit_mrm(as, XO_UCOMISD, left, right); + } else { + IRRef lref = ir->op1, rref = ir->op2; + IROp leftop = (IROp)(IR(lref)->o); + Reg r64 = REX_64IR(ir, 0); + int32_t imm = 0; + lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isaddr(ir->t)); + /* Swap constants (only for ABC) and fusable loads to the right. */ + if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { + if ((cc & 0xc) == 0xc) cc ^= 3; /* L <-> G, LE <-> GE */ + else if ((cc & 0xa) == 0x2) cc ^= 5; /* A <-> B, AE <-> BE */ + lref = ir->op2; rref = ir->op1; + } + if (asm_isk32(as, rref, &imm)) { + IRIns *irl = IR(lref); + /* Check wether we can use test ins. Not for unsigned, since CF=0. */ + int usetest = (imm == 0 && (cc & 0xa) != 0x2); + if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { + /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ + Reg right, left = RID_NONE; + RegSet allow = RSET_GPR; + if (!asm_isk32(as, irl->op2, &imm)) { + left = ra_alloc1(as, irl->op2, allow); + rset_clear(allow, left); + } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ + IRIns *irll = IR(irl->op1); + if (opisfusableload((IROp)irll->o) && + (irt_isi8(irll->t) || irt_isu8(irll->t))) { + IRType1 origt = irll->t; /* Temporarily flip types. */ + irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; + as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ + right = asm_fuseload(as, irl->op1, RSET_GPR); + as->curins++; + irll->t = origt; + if (right != RID_MRM) goto test_nofuse; + /* Fusion succeeded, emit test byte mrm, imm8. */ + asm_guardcc(as, cc); + emit_i8(as, (imm & 0xff)); + emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); + return; + } + } + as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ + right = asm_fuseload(as, irl->op1, allow); + as->curins++; /* Undo the above. */ + test_nofuse: + asm_guardcc(as, cc); + if (ra_noreg(left)) { + emit_i32(as, imm); + emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); + } else { + emit_mrm(as, XO_TEST, r64 + left, right); + } + } else { + Reg left; + if (opisfusableload((IROp)irl->o) && + ((irt_isu8(irl->t) && checku8(imm)) || + ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || + (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { + /* Only the IRT_INT case is fused by asm_fuseload. + ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads + ** are handled here. + ** Note that cmp word [mem], imm16 should not be generated, + ** since it has a length-changing prefix. Compares of a word + ** against a sign-extended imm8 are ok, however. + */ + IRType1 origt = irl->t; /* Temporarily flip types. */ + irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; + left = asm_fuseload(as, lref, RSET_GPR); + irl->t = origt; + if (left == RID_MRM) { /* Fusion succeeded? */ + if (irt_isu8(irl->t) || irt_isu16(irl->t)) + cc >>= 4; /* Need unsigned compare. */ + asm_guardcc(as, cc); + emit_i8(as, imm); + emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? + XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); + return; + } /* Otherwise handle register case as usual. */ + } else { + left = asm_fuseload(as, lref, RSET_GPR); + } + asm_guardcc(as, cc); + if (usetest && left != RID_MRM) { + /* Use test r,r instead of cmp r,0. */ + emit_rr(as, XO_TEST, r64 + left, left); + if (irl+1 == ir) /* Referencing previous ins? */ + as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ + } else { + emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); + } + } + } else { + Reg left = ra_alloc1(as, lref, RSET_GPR); + Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left)); + asm_guardcc(as, cc); + emit_mrm(as, XO_CMP, r64 + left, right); + } + } +} + +#if LJ_32 && LJ_HASFFI +/* 64 bit integer comparisons in 32 bit mode. */ +static void asm_comp_int64(ASMState *as, IRIns *ir) +{ + uint32_t cc = asm_compmap[(ir-1)->o]; + RegSet allow = RSET_GPR; + Reg lefthi = RID_NONE, leftlo = RID_NONE; + Reg righthi = RID_NONE, rightlo = RID_NONE; + MCLabel l_around; + x86ModRM mrm; + + as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ + + /* Allocate/fuse hiword operands. */ + if (irref_isk(ir->op2)) { + lefthi = asm_fuseload(as, ir->op1, allow); + } else { + lefthi = ra_alloc1(as, ir->op1, allow); + righthi = asm_fuseload(as, ir->op2, allow); + if (righthi == RID_MRM) { + if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); + if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); + } else { + rset_clear(allow, righthi); + } + } + mrm = as->mrm; /* Save state for hiword instruction. */ + + /* Allocate/fuse loword operands. */ + if (irref_isk((ir-1)->op2)) { + leftlo = asm_fuseload(as, (ir-1)->op1, allow); + } else { + leftlo = ra_alloc1(as, (ir-1)->op1, allow); + rightlo = asm_fuseload(as, (ir-1)->op2, allow); + if (rightlo == RID_MRM) { + if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); + if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); + } else { + rset_clear(allow, rightlo); + } + } + + /* All register allocations must be performed _before_ this point. */ + l_around = emit_label(as); + as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ + + /* Loword comparison and branch. */ + asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ + if (ra_noreg(rightlo)) { + int32_t imm = IR((ir-1)->op2)->i; + if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) + emit_rr(as, XO_TEST, leftlo, leftlo); + else + emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); + } else { + emit_mrm(as, XO_CMP, leftlo, rightlo); + } + + /* Hiword comparison and branches. */ + if ((cc & 15) != CC_NE) + emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ + if ((cc & 15) != CC_E) + asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ + as->mrm = mrm; /* Restore state. */ + if (ra_noreg(righthi)) { + int32_t imm = IR(ir->op2)->i; + if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) + emit_rr(as, XO_TEST, lefthi, lefthi); + else + emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); + } else { + emit_mrm(as, XO_CMP, lefthi, righthi); + } +} +#endif + +/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ + +/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ +static void asm_hiop(ASMState *as, IRIns *ir) +{ +#if LJ_32 && LJ_HASFFI + /* HIOP is marked as a store because it needs its own DCE logic. */ + int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ + if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; + if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ + if (usehi || uselo) { + if (irt_isfp(ir->t)) + asm_conv_fp_int64(as, ir); + else + asm_conv_int64_fp(as, ir); + } + as->curins--; /* Always skip the CONV. */ + return; + } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ + asm_comp_int64(as, ir); + return; + } + if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ + switch ((ir-1)->o) { + case IR_ADD: + asm_intarith(as, ir, uselo ? XOg_ADC : XOg_ADD); + break; + case IR_SUB: + asm_intarith(as, ir, uselo ? XOg_SBB : XOg_SUB); + break; + case IR_NEG: { + Reg dest = ra_dest(as, ir, RSET_GPR); + emit_rr(as, XO_GROUP3, XOg_NEG, dest); + if (uselo) { + emit_i8(as, 0); + emit_rr(as, XO_ARITHi8, XOg_ADC, dest); + } + ra_left(as, dest, ir->op1); + break; + } + case IR_CALLN: + case IR_CALLXS: + ra_destreg(as, ir, RID_RETHI); + if (!uselo) + ra_allocref(as, ir->op1, RID2RSET(RID_RET)); /* Mark call as used. */ + break; + case IR_CNEWI: + /* Nothing to do here. Handled by CNEWI itself. */ + break; + default: lua_assert(0); break; + } +#else + UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ +#endif +} + +/* -- Stack handling ------------------------------------------------------ */ + +/* Check Lua stack size for overflow. Use exit handler as fallback. */ +static void asm_stack_check(ASMState *as, BCReg topslot, + IRIns *irp, RegSet allow, ExitNo exitno) +{ + /* Try to get an unused temp. register, otherwise spill/restore eax. */ + Reg pbase = irp ? irp->r : RID_BASE; + Reg r = allow ? rset_pickbot(allow) : RID_EAX; + emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); + if (allow == RSET_EMPTY) /* Restore temp. register. */ + emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); + else + ra_modified(as, r); + emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); + if (ra_hasreg(pbase) && pbase != r) + emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); + else + emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, + ptr2addr(&J2G(as->J)->jit_base)); + emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); + emit_getgl(as, r, jit_L); + if (allow == RSET_EMPTY) /* Spill temp. register. */ + emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); +} + +/* Restore Lua stack from on-trace state. */ +static void asm_stack_restore(ASMState *as, SnapShot *snap) +{ + SnapEntry *map = &as->T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + SnapEntry *flinks = map + nent + snap->depth; + /* Store the value of all modified slots to the Lua stack. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + BCReg s = snap_slot(sn); + int32_t ofs = 8*((int32_t)s-1); + IRRef ref = snap_ref(sn); + IRIns *ir = IR(ref); + if ((sn & SNAP_NORESTORE)) + continue; + if (irt_isnum(ir->t)) { + Reg src = ra_alloc1(as, ref, RSET_FPR); + emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); + } else { + lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || + (LJ_DUALNUM && irt_isinteger(ir->t))); + if (!irref_isk(ref)) { + Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); + emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); + } else if (!irt_ispri(ir->t)) { + emit_movmroi(as, RID_BASE, ofs, ir->i); + } + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + if (s != 0) /* Do not overwrite link to previous frame. */ + emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); + } else { + if (!(LJ_64 && irt_islightud(ir->t))) + emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); + } + } + checkmclim(as); + } + lua_assert(map + nent == flinks); +} + +/* -- GC handling --------------------------------------------------------- */ + +/* Check GC threshold and do one or more GC steps. */ +static void asm_gc_check(ASMState *as) +{ + const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; + IRRef args[2]; + MCLabel l_end; + Reg tmp; + ra_evictset(as, RSET_SCRATCH); + l_end = emit_label(as); + /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ + asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ + emit_rr(as, XO_TEST, RID_RET, RID_RET); + args[0] = ASMREF_TMP1; /* global_State *g */ + args[1] = ASMREF_TMP2; /* MSize steps */ + asm_gencall(as, ci, args); + tmp = ra_releasetmp(as, ASMREF_TMP1); + emit_loada(as, tmp, J2G(as->J)); + emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), (int32_t)as->gcsteps); + /* Jump around GC step if GC total < GC threshold. */ + emit_sjcc(as, CC_B, l_end); + emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); + emit_getgl(as, tmp, gc.total); + as->gcsteps = 0; + checkmclim(as); +} + +/* -- Loop handling ------------------------------------------------------- */ + +/* Fixup the loop branch. */ +static void asm_loop_fixup(ASMState *as) +{ + MCode *p = as->mctop; + MCode *target = as->mcp; + if (as->realign) { /* Realigned loops use short jumps. */ + as->realign = NULL; /* Stop another retry. */ + lua_assert(((intptr_t)target & 15) == 0); + if (as->loopinv) { /* Inverted loop branch? */ + p -= 5; + p[0] = XI_JMP; + lua_assert(target - p >= -128); + p[-1] = (MCode)(target - p); /* Patch sjcc. */ + if (as->loopinv == 2) + p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ + } else { + lua_assert(target - p >= -128); + p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ + p[-2] = XI_JMPs; + } + } else { + MCode *newloop; + p[-5] = XI_JMP; + if (as->loopinv) { /* Inverted loop branch? */ + /* asm_guardcc already inverted the jcc and patched the jmp. */ + p -= 5; + newloop = target+4; + *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ + if (as->loopinv == 2) { + *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ + newloop = target+8; + } + } else { /* Otherwise just patch jmp. */ + *(int32_t *)(p-4) = (int32_t)(target - p); + newloop = target+3; + } + /* Realign small loops and shorten the loop branch. */ + if (newloop >= p - 128) { + as->realign = newloop; /* Force a retry and remember alignment. */ + as->curins = as->stopins; /* Abort asm_trace now. */ + as->T->nins = as->orignins; /* Remove any added renames. */ + } + } +} + +/* -- Head of trace ------------------------------------------------------- */ + +/* Coalesce BASE register for a root trace. */ +static void asm_head_root_base(ASMState *as) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (r != RID_BASE) + emit_rr(as, XO_MOV, r, RID_BASE); + } +} + +/* Coalesce or reload BASE register for a side trace. */ +static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) +{ + IRIns *ir = IR(REF_BASE); + Reg r = ir->r; + if (ra_hasreg(r)) { + ra_free(as, r); + if (rset_test(as->modset, r)) + ir->r = RID_INIT; /* No inheritance for modified BASE register. */ + if (irp->r == r) { + rset_clear(allow, r); /* Mark same BASE register as coalesced. */ + } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { + rset_clear(allow, irp->r); + emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */ + } else { + emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ + } + } + return allow; +} + +/* -- Tail of trace ------------------------------------------------------- */ + +/* Fixup the tail code. */ +static void asm_tail_fixup(ASMState *as, TraceNo lnk) +{ + /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ + MCode *p = as->mctop; + MCode *target, *q; + int32_t spadj = as->T->spadjust; + if (spadj == 0) { + p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); + } else { + MCode *p1; + /* Patch stack adjustment. */ + if (checki8(spadj)) { + p -= 3; + p1 = p-6; + *p1 = (MCode)spadj; + } else { + p1 = p-9; + *(int32_t *)p1 = spadj; + } + if ((as->flags & JIT_F_LEA_AGU)) { +#if LJ_64 + p1[-4] = 0x48; +#endif + p1[-3] = (MCode)XI_LEA; + p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); + p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } else { +#if LJ_64 + p1[-3] = 0x48; +#endif + p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); + p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); + } + } + /* Patch exit branch. */ + target = lnk == TRACE_INTERP ? (MCode *)lj_vm_exit_interp : + traceref(as->J, lnk)->mcode; + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = XI_JMP; + /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ + for (q = as->mctop-1; q >= p; q--) + *q = XI_NOP; + as->mctop = p; +} + +/* Prepare tail of code. */ +static void asm_tail_prep(ASMState *as) +{ + MCode *p = as->mctop; + /* Realign and leave room for backwards loop branch or exit branch. */ + if (as->realign) { + int i = ((int)(intptr_t)as->realign) & 15; + /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ + while (i-- > 0) + *--p = XI_NOP; + as->mctop = p; + p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ + } else { + p -= 5; /* Space for exit branch (near jmp). */ + } + if (as->loopref) { + as->invmcp = as->mcp = p; + } else { + /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ + as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); + as->invmcp = NULL; + } +} + +/* -- Instruction dispatch ------------------------------------------------ */ + +/* Assemble a single instruction. */ +static void asm_ir(ASMState *as, IRIns *ir) +{ + switch ((IROp)ir->o) { + /* Miscellaneous ops. */ + case IR_LOOP: asm_loop(as); break; + case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; + case IR_USE: + ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; + case IR_PHI: asm_phi(as, ir); break; + case IR_HIOP: asm_hiop(as, ir); break; + + /* Guarded assertions. */ + case IR_LT: case IR_GE: case IR_LE: case IR_GT: + case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: + case IR_EQ: case IR_NE: case IR_ABC: + asm_comp(as, ir, asm_compmap[ir->o]); + break; + + case IR_RETF: asm_retf(as, ir); break; + + /* Bit ops. */ + case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break; + case IR_BSWAP: asm_bitswap(as, ir); break; + + case IR_BAND: asm_intarith(as, ir, XOg_AND); break; + case IR_BOR: asm_intarith(as, ir, XOg_OR); break; + case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break; + + case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break; + case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break; + case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break; + case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break; + case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break; + + /* Arithmetic ops. */ + case IR_ADD: asm_add(as, ir); break; + case IR_SUB: + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_SUBSD); + else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ + asm_intarith(as, ir, XOg_SUB); + break; + case IR_MUL: + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MULSD); + else + asm_intarith(as, ir, XOg_X_IMUL); + break; + case IR_DIV: +#if LJ_64 && LJ_HASFFI + if (!irt_isnum(ir->t)) + asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : + IRCALL_lj_carith_divu64); + else +#endif + asm_fparith(as, ir, XO_DIVSD); + break; + case IR_MOD: +#if LJ_64 && LJ_HASFFI + if (!irt_isint(ir->t)) + asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : + IRCALL_lj_carith_modu64); + else +#endif + asm_intmod(as, ir); + break; + + case IR_NEG: + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_XORPS); + else + asm_neg_not(as, ir, XOg_NEG); + break; + case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break; + + case IR_MIN: + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MINSD); + else + asm_min_max(as, ir, CC_G); + break; + case IR_MAX: + if (irt_isnum(ir->t)) + asm_fparith(as, ir, XO_MAXSD); + else + asm_min_max(as, ir, CC_L); + break; + + case IR_FPMATH: case IR_ATAN2: case IR_LDEXP: + asm_fpmath(as, ir); + break; + case IR_POW: +#if LJ_64 && LJ_HASFFI + if (!irt_isnum(ir->t)) + asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : + IRCALL_lj_carith_powu64); + else +#endif + asm_fppowi(as, ir); + break; + + /* Overflow-checking arithmetic ops. Note: don't use LEA here! */ + case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break; + case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break; + case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break; + + /* Memory references. */ + case IR_AREF: asm_aref(as, ir); break; + case IR_HREF: asm_href(as, ir); break; + case IR_HREFK: asm_hrefk(as, ir); break; + case IR_NEWREF: asm_newref(as, ir); break; + case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; + case IR_FREF: asm_fref(as, ir); break; + case IR_STRREF: asm_strref(as, ir); break; + + /* Loads and stores. */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + asm_ahuvload(as, ir); + break; + case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break; + case IR_SLOAD: asm_sload(as, ir); break; + + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; + case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break; + + /* Allocations. */ + case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; + case IR_TNEW: asm_tnew(as, ir); break; + case IR_TDUP: asm_tdup(as, ir); break; + case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; + + /* Write barriers. */ + case IR_TBAR: asm_tbar(as, ir); break; + case IR_OBAR: asm_obar(as, ir); break; + + /* Type conversions. */ + case IR_TOBIT: asm_tobit(as, ir); break; + case IR_CONV: asm_conv(as, ir); break; + case IR_TOSTR: asm_tostr(as, ir); break; + case IR_STRTO: asm_strto(as, ir); break; + + /* Calls. */ + case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; + case IR_CALLXS: asm_callx(as, ir); break; + case IR_CARG: break; + + default: + setintV(&as->J->errinfo, ir->o); + lj_trace_err_info(as->J, LJ_TRERR_NYIIR); + break; + } +} + +/* -- Trace setup --------------------------------------------------------- */ + +/* Ensure there are enough stack slots for call arguments. */ +static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) +{ + IRRef args[CCI_NARGS_MAX]; + uint32_t nargs = (int)CCI_NARGS(ci); + int nslots = 0; + asm_collectargs(as, ir, ci, args); +#if LJ_64 + if (LJ_ABI_WIN) { + nslots = (int)(nargs*2); /* Only matters for more than four args. */ + } else { + uint32_t i; + int ngpr = 6, nfpr = 8; + for (i = 0; i < nargs; i++) + if (irt_isfp(IR(args[i])->t)) { + if (nfpr > 0) nfpr--; else nslots += 2; + } else { + if (ngpr > 0) ngpr--; else nslots += 2; + } + } + if (nslots > as->evenspill) /* Leave room for args in stack slots. */ + as->evenspill = nslots; + return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); +#else + if ((ci->flags & CCI_FASTCALL)) { + lua_assert(nargs <= 2); + } else { + uint32_t i; + for (i = 0; i < nargs; i++) + nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; + if (nslots > as->evenspill) /* Leave room for args. */ + as->evenspill = nslots; + } + return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); +#endif +} + +/* Target-specific setup. */ +static void asm_setup_target(ASMState *as) +{ + asm_exitstub_setup(as, as->T->nsnap); +} + +/* -- Trace patching ------------------------------------------------------ */ + +/* Patch exit jumps of existing machine code to a new target. */ +void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) +{ + MCode *p = T->mcode; + MCode *mcarea = lj_mcode_patch(J, p, 0); + MSize len = T->szmcode; + MCode *px = exitstub_addr(J, exitno) - 6; + MCode *pe = p+len-6; + uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); + if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) + *(int32_t *)(p+len-4) = jmprel(p+len, target); + /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ + for (; p < pe; p++) + if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { + p += LJ_64 ? 11 : 10; + break; + } + lua_assert(p < pe); + for (; p < pe; p++) { + if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { + *(int32_t *)(p+2) = jmprel(p+6, target); + p += 5; + } + } + lj_mcode_patch(J, mcarea, 1); + VG_INVALIDATE(T->mcode, T->szmcode); +} + diff --git a/src/luajit2/src/lj_bc.c b/src/luajit2/src/lj_bc.c new file mode 100644 index 0000000000..feb8e44d5f --- /dev/null +++ b/src/luajit2/src/lj_bc.c @@ -0,0 +1,14 @@ +/* +** Bytecode instruction modes. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bc_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_bc.h" + +/* Bytecode offsets and bytecode instruction modes. */ +#include "lj_bcdef.h" + diff --git a/src/luajit2/src/lj_bc.h b/src/luajit2/src/lj_bc.h new file mode 100644 index 0000000000..97e4d92643 --- /dev/null +++ b/src/luajit2/src/lj_bc.h @@ -0,0 +1,261 @@ +/* +** Bytecode instruction format. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_BC_H +#define _LJ_BC_H + +#include "lj_def.h" +#include "lj_arch.h" + +/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit: +** +** +----+----+----+----+ +** | B | C | A | OP | Format ABC +** +----+----+----+----+ +** | D | A | OP | Format AD +** +-------------------- +** MSB LSB +** +** In-memory instructions are always stored in host byte order. +*/ + +/* Operand ranges and related constants. */ +#define BCMAX_A 0xff +#define BCMAX_B 0xff +#define BCMAX_C 0xff +#define BCMAX_D 0xffff +#define BCBIAS_J 0x8000 +#define NO_REG BCMAX_A +#define NO_JMP (~(BCPos)0) + +/* Macros to get instruction fields. */ +#define bc_op(i) ((BCOp)((i)&0xff)) +#define bc_a(i) ((BCReg)(((i)>>8)&0xff)) +#define bc_b(i) ((BCReg)((i)>>24)) +#define bc_c(i) ((BCReg)(((i)>>16)&0xff)) +#define bc_d(i) ((BCReg)((i)>>16)) +#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J) + +/* Macros to set instruction fields. */ +#define setbc_byte(p, x, ofs) \ + ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x) +#define setbc_op(p, x) setbc_byte(p, (x), 0) +#define setbc_a(p, x) setbc_byte(p, (x), 1) +#define setbc_b(p, x) setbc_byte(p, (x), 3) +#define setbc_c(p, x) setbc_byte(p, (x), 2) +#define setbc_d(p, x) \ + ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x) +#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J)) + +/* Macros to compose instructions. */ +#define BCINS_ABC(o, a, b, c) \ + (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16)) +#define BCINS_AD(o, a, d) \ + (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16)) +#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J)) + +/* Bytecode instruction definition. Order matters, see below. +** +** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod) +** +** The opcode name suffixes specify the type for RB/RC or RD: +** V = variable slot +** S = string const +** N = number const +** P = primitive type (~itype) +** B = unsigned byte literal +** M = multiple args/results +*/ +#define BCDEF(_) \ + /* Comparison ops. ORDER OPR. */ \ + _(ISLT, var, ___, var, lt) \ + _(ISGE, var, ___, var, lt) \ + _(ISLE, var, ___, var, le) \ + _(ISGT, var, ___, var, le) \ + \ + _(ISEQV, var, ___, var, eq) \ + _(ISNEV, var, ___, var, eq) \ + _(ISEQS, var, ___, str, eq) \ + _(ISNES, var, ___, str, eq) \ + _(ISEQN, var, ___, num, eq) \ + _(ISNEN, var, ___, num, eq) \ + _(ISEQP, var, ___, pri, eq) \ + _(ISNEP, var, ___, pri, eq) \ + \ + /* Unary test and copy ops. */ \ + _(ISTC, dst, ___, var, ___) \ + _(ISFC, dst, ___, var, ___) \ + _(IST, ___, ___, var, ___) \ + _(ISF, ___, ___, var, ___) \ + \ + /* Unary ops. */ \ + _(MOV, dst, ___, var, ___) \ + _(NOT, dst, ___, var, ___) \ + _(UNM, dst, ___, var, unm) \ + _(LEN, dst, ___, var, len) \ + \ + /* Binary ops. ORDER OPR. VV last, POW must be next. */ \ + _(ADDVN, dst, var, num, add) \ + _(SUBVN, dst, var, num, sub) \ + _(MULVN, dst, var, num, mul) \ + _(DIVVN, dst, var, num, div) \ + _(MODVN, dst, var, num, mod) \ + \ + _(ADDNV, dst, var, num, add) \ + _(SUBNV, dst, var, num, sub) \ + _(MULNV, dst, var, num, mul) \ + _(DIVNV, dst, var, num, div) \ + _(MODNV, dst, var, num, mod) \ + \ + _(ADDVV, dst, var, var, add) \ + _(SUBVV, dst, var, var, sub) \ + _(MULVV, dst, var, var, mul) \ + _(DIVVV, dst, var, var, div) \ + _(MODVV, dst, var, var, mod) \ + \ + _(POW, dst, var, var, pow) \ + _(CAT, dst, rbase, rbase, concat) \ + \ + /* Constant ops. */ \ + _(KSTR, dst, ___, str, ___) \ + _(KCDATA, dst, ___, cdata, ___) \ + _(KSHORT, dst, ___, lits, ___) \ + _(KNUM, dst, ___, num, ___) \ + _(KPRI, dst, ___, pri, ___) \ + _(KNIL, base, ___, base, ___) \ + \ + /* Upvalue and function ops. */ \ + _(UGET, dst, ___, uv, ___) \ + _(USETV, uv, ___, var, ___) \ + _(USETS, uv, ___, str, ___) \ + _(USETN, uv, ___, num, ___) \ + _(USETP, uv, ___, pri, ___) \ + _(UCLO, rbase, ___, jump, ___) \ + _(FNEW, dst, ___, func, gc) \ + \ + /* Table ops. */ \ + _(TNEW, dst, ___, lit, gc) \ + _(TDUP, dst, ___, tab, gc) \ + _(GGET, dst, ___, str, index) \ + _(GSET, var, ___, str, newindex) \ + _(TGETV, dst, var, var, index) \ + _(TGETS, dst, var, str, index) \ + _(TGETB, dst, var, lit, index) \ + _(TSETV, var, var, var, newindex) \ + _(TSETS, var, var, str, newindex) \ + _(TSETB, var, var, lit, newindex) \ + _(TSETM, base, ___, num, newindex) \ + \ + /* Calls and vararg handling. T = tail call. */ \ + _(CALLM, base, lit, lit, call) \ + _(CALL, base, lit, lit, call) \ + _(CALLMT, base, ___, lit, call) \ + _(CALLT, base, ___, lit, call) \ + _(ITERC, base, lit, lit, call) \ + _(ITERN, base, lit, lit, call) \ + _(VARG, base, lit, lit, ___) \ + _(ISNEXT, base, ___, jump, ___) \ + \ + /* Returns. */ \ + _(RETM, base, ___, lit, ___) \ + _(RET, rbase, ___, lit, ___) \ + _(RET0, rbase, ___, lit, ___) \ + _(RET1, rbase, ___, lit, ___) \ + \ + /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \ + _(FORI, base, ___, jump, ___) \ + _(JFORI, base, ___, jump, ___) \ + \ + _(FORL, base, ___, jump, ___) \ + _(IFORL, base, ___, jump, ___) \ + _(JFORL, base, ___, lit, ___) \ + \ + _(ITERL, base, ___, jump, ___) \ + _(IITERL, base, ___, jump, ___) \ + _(JITERL, base, ___, lit, ___) \ + \ + _(LOOP, rbase, ___, jump, ___) \ + _(ILOOP, rbase, ___, jump, ___) \ + _(JLOOP, rbase, ___, lit, ___) \ + \ + _(JMP, rbase, ___, jump, ___) \ + \ + /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \ + _(FUNCF, rbase, ___, ___, ___) \ + _(IFUNCF, rbase, ___, ___, ___) \ + _(JFUNCF, rbase, ___, lit, ___) \ + _(FUNCV, rbase, ___, ___, ___) \ + _(IFUNCV, rbase, ___, ___, ___) \ + _(JFUNCV, rbase, ___, lit, ___) \ + _(FUNCC, rbase, ___, ___, ___) \ + _(FUNCCW, rbase, ___, ___, ___) + +/* Bytecode opcode numbers. */ +typedef enum { +#define BCENUM(name, ma, mb, mc, mt) BC_##name, +BCDEF(BCENUM) +#undef BCENUM + BC__MAX +} BCOp; + +LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV); +LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV); +LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES); +LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN); +LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP); +LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE); +LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT); +LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT); +LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC); +LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM); +LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT); +LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET); +LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL); +LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL); +LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL); +LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL); +LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP); +LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP); +LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF); +LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF); +LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV); +LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV); + +/* This solves a circular dependency problem, change as needed. */ +#define FF_next_N 15 + +/* Stack slots used by FORI/FORL, relative to operand A. */ +enum { + FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT +}; + +/* Bytecode operand modes. ORDER BCMode */ +typedef enum { + BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */ + BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata, + BCM_max +} BCMode; +#define BCM___ BCMnone + +#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7)) +#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15)) +#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15)) +#define bcmode_d(op) bcmode_c(op) +#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3)) +#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11)) + +#define BCMODE(name, ma, mb, mc, mm) \ + (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)), +#define BCMODE_FF 0 + +static LJ_AINLINE int bc_isret(BCOp op) +{ + return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1); +} + +LJ_DATA const uint16_t lj_bc_mode[]; +LJ_DATA const uint16_t lj_bc_ofs[]; + +#endif diff --git a/src/luajit2/src/lj_bcdump.h b/src/luajit2/src/lj_bcdump.h new file mode 100644 index 0000000000..49b59e852d --- /dev/null +++ b/src/luajit2/src/lj_bcdump.h @@ -0,0 +1,66 @@ +/* +** Bytecode dump definitions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_BCDUMP_H +#define _LJ_BCDUMP_H + +#include "lj_obj.h" +#include "lj_lex.h" + +/* -- Bytecode dump format ------------------------------------------------ */ + +/* +** dump = header proto+ 0U +** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*] +** proto = lengthU pdata +** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*] +** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU +** [debuglenU [firstlineU numlineU]] +** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* } +** knum = intU0 | (loU1 hiU) +** ktab = narrayU nhashU karray* khash* +** karray = ktabk +** khash = ktabk ktabk +** ktabk = ktabtypeU { intU | (loU hiU) | strB* } +** +** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1 +*/ + +/* Bytecode dump header. */ +#define BCDUMP_HEAD1 0x1b +#define BCDUMP_HEAD2 0x4c +#define BCDUMP_HEAD3 0x4a + +/* If you perform *any* kind of private modifications to the bytecode itself +** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher. +*/ +#define BCDUMP_VERSION 1 + +/* Compatibility flags. */ +#define BCDUMP_F_BE 0x01 +#define BCDUMP_F_STRIP 0x02 +#define BCDUMP_F_FFI 0x04 + +#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1) + +/* Type codes for the GC constants of a prototype. Plus length for strings. */ +enum { + BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64, + BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR +}; + +/* Type codes for the keys/values of a constant table. */ +enum { + BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE, + BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR +}; + +/* -- Bytecode reader/writer ---------------------------------------------- */ + +LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, + void *data, int strip); +LJ_FUNC GCproto *lj_bcread(LexState *ls); + +#endif diff --git a/src/luajit2/src/lj_bcread.c b/src/luajit2/src/lj_bcread.c new file mode 100644 index 0000000000..c5d4cd9d76 --- /dev/null +++ b/src/luajit2/src/lj_bcread.c @@ -0,0 +1,466 @@ +/* +** Bytecode reader. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bcread_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#endif +#include "lj_lex.h" +#include "lj_bcdump.h" +#include "lj_state.h" + +/* Reuse some lexer fields for our own purposes. */ +#define bcread_flags(ls) ls->level +#define bcread_swap(ls) \ + ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE) +#define bcread_oldtop(L, ls) restorestack(L, ls->lastline) +#define bcread_savetop(L, ls, top) \ + ls->lastline = (BCLine)savestack(L, (top)) + +/* -- Input buffer handling ----------------------------------------------- */ + +/* Throw reader error. */ +static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em) +{ + lua_State *L = ls->L; + const char *name = ls->chunkarg; + if (*name == BCDUMP_HEAD1) name = "(binary)"; + else if (*name == '@' || *name == '=') name++; + lj_str_pushf(L, "%s: %s", name, err2msg(em)); + lj_err_throw(L, LUA_ERRSYNTAX); +} + +/* Resize input buffer. */ +static void bcread_resize(LexState *ls, MSize len) +{ + if (ls->sb.sz < len) { + MSize sz = ls->sb.sz * 2; + while (len > sz) sz = sz * 2; + lj_str_resizebuf(ls->L, &ls->sb, sz); + /* Caveat: this may change ls->sb.buf which may affect ls->p. */ + } +} + +/* Refill buffer if needed. */ +static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) +{ + lua_assert(len != 0); + if (len > LJ_MAX_MEM || ls->current < 0) + bcread_error(ls, LJ_ERR_BCBAD); + do { + const char *buf; + size_t size; + if (ls->n) { /* Copy remainder to buffer. */ + if (ls->sb.n) { /* Move down in buffer. */ + lua_assert(ls->p + ls->n == ls->sb.buf + ls->sb.n); + if (ls->n != ls->sb.n) + memmove(ls->sb.buf, ls->p, ls->n); + } else { /* Copy from buffer provided by reader. */ + bcread_resize(ls, len); + memcpy(ls->sb.buf, ls->p, ls->n); + } + ls->p = ls->sb.buf; + } + ls->sb.n = ls->n; + buf = ls->rfunc(ls->L, ls->rdata, &size); /* Get more data from reader. */ + if (buf == NULL || size == 0) { /* EOF? */ + if (need) bcread_error(ls, LJ_ERR_BCBAD); + ls->current = -1; /* Only bad if we get called again. */ + break; + } + if (ls->sb.n) { /* Append to buffer. */ + MSize n = ls->sb.n + (MSize)size; + bcread_resize(ls, n < len ? len : n); + memcpy(ls->sb.buf + ls->sb.n, buf, size); + ls->n = ls->sb.n = n; + ls->p = ls->sb.buf; + } else { /* Return buffer provided by reader. */ + ls->n = (MSize)size; + ls->p = buf; + } + } while (ls->n < len); +} + +/* Need a certain number of bytes. */ +static LJ_AINLINE void bcread_need(LexState *ls, MSize len) +{ + if (LJ_UNLIKELY(ls->n < len)) + bcread_fill(ls, len, 1); +} + +/* Want to read up to a certain number of bytes, but may need less. */ +static LJ_AINLINE void bcread_want(LexState *ls, MSize len) +{ + if (LJ_UNLIKELY(ls->n < len)) + bcread_fill(ls, len, 0); +} + +#define bcread_dec(ls) check_exp(ls->n > 0, ls->n--) +#define bcread_consume(ls, len) check_exp(ls->n >= (len), ls->n -= (len)) + +/* Return memory block from buffer. */ +static uint8_t *bcread_mem(LexState *ls, MSize len) +{ + uint8_t *p = (uint8_t *)ls->p; + bcread_consume(ls, len); + ls->p = (char *)p + len; + return p; +} + +/* Copy memory block from buffer. */ +static void bcread_block(LexState *ls, void *q, MSize len) +{ + memcpy(q, bcread_mem(ls, len), len); +} + +/* Read byte from buffer. */ +static LJ_AINLINE uint32_t bcread_byte(LexState *ls) +{ + bcread_dec(ls); + return (uint32_t)(uint8_t)*ls->p++; +} + +/* Read ULEB128 value from buffer. */ +static uint32_t bcread_uleb128(LexState *ls) +{ + const uint8_t *p = (const uint8_t *)ls->p; + uint32_t v = *p++; + if (LJ_UNLIKELY(v >= 0x80)) { + int sh = 0; + v &= 0x7f; + do { + v |= ((*p & 0x7f) << (sh += 7)); + bcread_dec(ls); + } while (*p++ >= 0x80); + } + bcread_dec(ls); + ls->p = (char *)p; + return v; +} + +/* Read top 32 bits of 33 bit ULEB128 value from buffer. */ +static uint32_t bcread_uleb128_33(LexState *ls) +{ + const uint8_t *p = (const uint8_t *)ls->p; + uint32_t v = (*p++ >> 1); + if (LJ_UNLIKELY(v >= 0x40)) { + int sh = -1; + v &= 0x3f; + do { + v |= ((*p & 0x7f) << (sh += 7)); + bcread_dec(ls); + } while (*p++ >= 0x80); + } + bcread_dec(ls); + ls->p = (char *)p; + return v; +} + +/* -- Bytecode reader ----------------------------------------------------- */ + +/* Read debug info of a prototype. */ +static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg) +{ + void *lineinfo = (void *)proto_lineinfo(pt); + bcread_block(ls, lineinfo, sizedbg); + /* Swap lineinfo if the endianess differs. */ + if (bcread_swap(ls) && pt->numline >= 256) { + MSize i, n = pt->sizebc-1; + if (pt->numline < 65536) { + uint16_t *p = (uint16_t *)lineinfo; + for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8)); + } else { + uint32_t *p = (uint32_t *)lineinfo; + for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]); + } + } +} + +/* Find pointer to varinfo. */ +static const void *bcread_varinfo(GCproto *pt) +{ + const uint8_t *p = proto_uvinfo(pt); + MSize n = pt->sizeuv; + if (n) while (*p++ || --n) ; + return p; +} + +/* Read a single constant key/value of a template table. */ +static void bcread_ktabk(LexState *ls, TValue *o) +{ + MSize tp = bcread_uleb128(ls); + if (tp >= BCDUMP_KTAB_STR) { + MSize len = tp - BCDUMP_KTAB_STR; + const char *p = (const char *)bcread_mem(ls, len); + setstrV(ls->L, o, lj_str_new(ls->L, p, len)); + } else if (tp == BCDUMP_KTAB_INT) { + setintV(o, (int32_t)bcread_uleb128(ls)); + } else if (tp == BCDUMP_KTAB_NUM) { + o->u32.lo = bcread_uleb128(ls); + o->u32.hi = bcread_uleb128(ls); + } else { + lua_assert(tp <= BCDUMP_KTAB_TRUE); + setitype(o, ~tp); + } +} + +/* Read a template table. */ +static GCtab *bcread_ktab(LexState *ls) +{ + MSize narray = bcread_uleb128(ls); + MSize nhash = bcread_uleb128(ls); + GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash)); + if (narray) { /* Read array entries. */ + MSize i; + TValue *o = tvref(t->array); + for (i = 0; i < narray; i++, o++) + bcread_ktabk(ls, o); + } + if (nhash) { /* Read hash entries. */ + MSize i; + for (i = 0; i < nhash; i++) { + TValue key; + bcread_ktabk(ls, &key); + lua_assert(!tvisnil(&key)); + bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); + } + } + return t; +} + +/* Read GC constants of a prototype. */ +static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc) +{ + MSize i; + GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; + for (i = 0; i < sizekgc; i++, kr++) { + MSize tp = bcread_uleb128(ls); + if (tp >= BCDUMP_KGC_STR) { + MSize len = tp - BCDUMP_KGC_STR; + const char *p = (const char *)bcread_mem(ls, len); + setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len))); + } else if (tp == BCDUMP_KGC_TAB) { + setgcref(*kr, obj2gco(bcread_ktab(ls))); +#if LJ_HASFFI + } else if (tp != BCDUMP_KGC_CHILD) { + CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE : + tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64; + CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8; + GCcdata *cd = lj_cdata_new_(ls->L, id, sz); + TValue *p = (TValue *)cdataptr(cd); + setgcref(*kr, obj2gco(cd)); + p[0].u32.lo = bcread_uleb128(ls); + p[0].u32.hi = bcread_uleb128(ls); + if (tp == BCDUMP_KGC_COMPLEX) { + p[1].u32.lo = bcread_uleb128(ls); + p[1].u32.hi = bcread_uleb128(ls); + } +#endif + } else { + lua_State *L = ls->L; + lua_assert(tp == BCDUMP_KGC_CHILD); + if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ + bcread_error(ls, LJ_ERR_BCBAD); + L->top--; + setgcref(*kr, obj2gco(protoV(L->top))); + } + } +} + +/* Read number constants of a prototype. */ +static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn) +{ + MSize i; + TValue *o = mref(pt->k, TValue); + for (i = 0; i < sizekn; i++, o++) { + int isnum = (ls->p[0] & 1); + uint32_t lo = bcread_uleb128_33(ls); + if (isnum) { + o->u32.lo = lo; + o->u32.hi = bcread_uleb128(ls); + } else { + setintV(o, lo); + } + } +} + +/* Read bytecode instructions. */ +static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc) +{ + BCIns *bc = proto_bc(pt); + bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, + pt->framesize, 0); + bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns)); + /* Swap bytecode instructions if the endianess differs. */ + if (bcread_swap(ls)) { + MSize i; + for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]); + } +} + +/* Read upvalue refs. */ +static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv) +{ + if (sizeuv) { + uint16_t *uv = proto_uv(pt); + bcread_block(ls, uv, sizeuv*2); + /* Swap upvalue refs if the endianess differs. */ + if (bcread_swap(ls)) { + MSize i; + for (i = 0; i < sizeuv; i++) + uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8)); + } + } +} + +/* Read a prototype. */ +static GCproto *bcread_proto(LexState *ls) +{ + GCproto *pt; + MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept; + MSize ofsk, ofsuv, ofsdbg; + MSize sizedbg = 0; + BCLine firstline = 0, numline = 0; + MSize len, startn; + + /* Read length. */ + if (ls->n > 0 && ls->p[0] == 0) { /* Shortcut EOF. */ + ls->n--; ls->p++; + return NULL; + } + bcread_want(ls, 5); + len = bcread_uleb128(ls); + if (!len) return NULL; /* EOF */ + bcread_need(ls, len); + startn = ls->n; + + /* Read prototype header. */ + flags = bcread_byte(ls); + numparams = bcread_byte(ls); + framesize = bcread_byte(ls); + sizeuv = bcread_byte(ls); + sizekgc = bcread_uleb128(ls); + sizekn = bcread_uleb128(ls); + sizebc = bcread_uleb128(ls) + 1; + if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) { + sizedbg = bcread_uleb128(ls); + if (sizedbg) { + firstline = bcread_uleb128(ls); + numline = bcread_uleb128(ls); + } + } + + /* Calculate total size of prototype including all colocated arrays. */ + sizept = (MSize)sizeof(GCproto) + + sizebc*(MSize)sizeof(BCIns) + + sizekgc*(MSize)sizeof(GCRef); + sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1); + ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue); + ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2; + ofsdbg = sizept; sizept += sizedbg; + + /* Allocate prototype object and initialize its fields. */ + pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept); + pt->gct = ~LJ_TPROTO; + pt->numparams = (uint8_t)numparams; + pt->framesize = (uint8_t)framesize; + pt->sizebc = sizebc; + setmref(pt->k, (char *)pt + ofsk); + setmref(pt->uv, (char *)pt + ofsuv); + pt->sizekgc = 0; /* Set to zero until fully initialized. */ + pt->sizekn = sizekn; + pt->sizept = sizept; + pt->sizeuv = (uint8_t)sizeuv; + pt->flags = (uint8_t)flags; + pt->trace = 0; + setgcref(pt->chunkname, obj2gco(ls->chunkname)); + + /* Close potentially uninitialized gap between bc and kgc. */ + *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0; + + /* Read bytecode instructions and upvalue refs. */ + bcread_bytecode(ls, pt, sizebc); + bcread_uv(ls, pt, sizeuv); + + /* Read constants. */ + bcread_kgc(ls, pt, sizekgc); + pt->sizekgc = sizekgc; + bcread_knum(ls, pt, sizekn); + + /* Read and initialize debug info. */ + pt->firstline = firstline; + pt->numline = numline; + if (sizedbg) { + MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); + setmref(pt->lineinfo, (char *)pt + ofsdbg); + setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli); + bcread_dbg(ls, pt, sizedbg); + setmref(pt->varinfo, bcread_varinfo(pt)); + } else { + setmref(pt->lineinfo, NULL); + setmref(pt->uvinfo, NULL); + setmref(pt->varinfo, NULL); + } + + if (len != startn - ls->n) + bcread_error(ls, LJ_ERR_BCBAD); + return pt; +} + +/* Read and check header of bytecode dump. */ +static int bcread_header(LexState *ls) +{ + uint32_t flags; + bcread_want(ls, 3+5+5); + if (bcread_byte(ls) != BCDUMP_HEAD2 || + bcread_byte(ls) != BCDUMP_HEAD3 || + bcread_byte(ls) != BCDUMP_VERSION) return 0; + bcread_flags(ls) = flags = bcread_uleb128(ls); + if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0; +#if !LJ_HASFFI + if ((flags & BCDUMP_F_FFI)) return 0; +#endif + if ((flags & BCDUMP_F_STRIP)) { + ls->chunkname = lj_str_newz(ls->L, ls->chunkarg); + } else { + MSize len = bcread_uleb128(ls); + bcread_need(ls, len); + ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len); + } + return 1; /* Ok. */ +} + +/* Read a bytecode dump. */ +GCproto *lj_bcread(LexState *ls) +{ + lua_State *L = ls->L; + lua_assert(ls->current == BCDUMP_HEAD1); + bcread_savetop(L, ls, L->top); + lj_str_resetbuf(&ls->sb); + /* Check for a valid bytecode dump header. */ + if (!bcread_header(ls)) + bcread_error(ls, LJ_ERR_BCFMT); + for (;;) { /* Process all prototypes in the bytecode dump. */ + GCproto *pt = bcread_proto(ls); + if (!pt) break; + setprotoV(L, L->top, pt); + incr_top(L); + } + if (ls->n != 0 || L->top-1 != bcread_oldtop(L, ls)) + bcread_error(ls, LJ_ERR_BCBAD); + /* Pop off last prototype. */ + L->top--; + return protoV(L->top); +} + diff --git a/src/luajit2/src/lj_bcwrite.c b/src/luajit2/src/lj_bcwrite.c new file mode 100644 index 0000000000..de9b4cf29e --- /dev/null +++ b/src/luajit2/src/lj_bcwrite.c @@ -0,0 +1,389 @@ +/* +** Bytecode writer. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_bcwrite_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_str.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#if LJ_HASJIT +#include "lj_dispatch.h" +#include "lj_jit.h" +#endif +#include "lj_bcdump.h" +#include "lj_vm.h" + +/* Context for bytecode writer. */ +typedef struct BCWriteCtx { + SBuf sb; /* Output buffer. */ + lua_State *L; /* Lua state. */ + GCproto *pt; /* Root prototype. */ + lua_Writer wfunc; /* Writer callback. */ + void *wdata; /* Writer callback data. */ + int strip; /* Strip debug info. */ + int status; /* Status from writer callback. */ +} BCWriteCtx; + +/* -- Output buffer handling ---------------------------------------------- */ + +/* Resize buffer if needed. */ +static LJ_NOINLINE void bcwrite_resize(BCWriteCtx *ctx, MSize len) +{ + MSize sz = ctx->sb.sz * 2; + while (ctx->sb.n + len > sz) sz = sz * 2; + lj_str_resizebuf(ctx->L, &ctx->sb, sz); +} + +/* Need a certain amount of buffer space. */ +static LJ_AINLINE void bcwrite_need(BCWriteCtx *ctx, MSize len) +{ + if (LJ_UNLIKELY(ctx->sb.n + len > ctx->sb.sz)) + bcwrite_resize(ctx, len); +} + +/* Add memory block to buffer. */ +static void bcwrite_block(BCWriteCtx *ctx, const void *p, MSize len) +{ + uint8_t *q = (uint8_t *)(ctx->sb.buf + ctx->sb.n); + MSize i; + ctx->sb.n += len; + for (i = 0; i < len; i++) q[i] = ((uint8_t *)p)[i]; +} + +/* Add byte to buffer. */ +static LJ_AINLINE void bcwrite_byte(BCWriteCtx *ctx, uint8_t b) +{ + ctx->sb.buf[ctx->sb.n++] = b; +} + +/* Add ULEB128 value to buffer. */ +static void bcwrite_uleb128(BCWriteCtx *ctx, uint32_t v) +{ + MSize n = ctx->sb.n; + uint8_t *p = (uint8_t *)ctx->sb.buf; + for (; v >= 0x80; v >>= 7) + p[n++] = (uint8_t)((v & 0x7f) | 0x80); + p[n++] = (uint8_t)v; + ctx->sb.n = n; +} + +/* -- Bytecode writer ----------------------------------------------------- */ + +/* Write a single constant key/value of a template table. */ +static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow) +{ + bcwrite_need(ctx, 1+10); + if (tvisstr(o)) { + const GCstr *str = strV(o); + MSize len = str->len; + bcwrite_need(ctx, 5+len); + bcwrite_uleb128(ctx, BCDUMP_KTAB_STR+len); + bcwrite_block(ctx, strdata(str), len); + } else if (tvisint(o)) { + bcwrite_byte(ctx, BCDUMP_KTAB_INT); + bcwrite_uleb128(ctx, intV(o)); + } else if (tvisnum(o)) { + if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */ + lua_Number num = numV(o); + int32_t k = lj_num2int(num); + if (num == (lua_Number)k) { /* -0 is never a constant. */ + bcwrite_byte(ctx, BCDUMP_KTAB_INT); + bcwrite_uleb128(ctx, k); + return; + } + } + bcwrite_byte(ctx, BCDUMP_KTAB_NUM); + bcwrite_uleb128(ctx, o->u32.lo); + bcwrite_uleb128(ctx, o->u32.hi); + } else { + lua_assert(tvispri(o)); + bcwrite_byte(ctx, BCDUMP_KTAB_NIL+~itype(o)); + } +} + +/* Write a template table. */ +static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t) +{ + MSize narray = 0, nhash = 0; + if (t->asize > 0) { /* Determine max. length of array part. */ + ptrdiff_t i; + TValue *array = tvref(t->array); + for (i = (ptrdiff_t)t->asize-1; i >= 0; i--) + if (!tvisnil(&array[i])) + break; + narray = (MSize)(i+1); + } + if (t->hmask > 0) { /* Count number of used hash slots. */ + MSize i, hmask = t->hmask; + Node *node = noderef(t->node); + for (i = 0; i <= hmask; i++) + nhash += !tvisnil(&node[i].val); + } + /* Write number of array slots and hash slots. */ + bcwrite_uleb128(ctx, narray); + bcwrite_uleb128(ctx, nhash); + if (narray) { /* Write array entries (may contain nil). */ + MSize i; + TValue *o = tvref(t->array); + for (i = 0; i < narray; i++, o++) + bcwrite_ktabk(ctx, o, 1); + } + if (nhash) { /* Write hash entries. */ + MSize i = nhash; + Node *node = noderef(t->node) + t->hmask; + for (;; node--) + if (!tvisnil(&node->val)) { + bcwrite_ktabk(ctx, &node->key, 0); + bcwrite_ktabk(ctx, &node->val, 1); + if (--i == 0) break; + } + } +} + +/* Write GC constants of a prototype. */ +static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt) +{ + MSize i, sizekgc = pt->sizekgc; + GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc; + for (i = 0; i < sizekgc; i++, kr++) { + GCobj *o = gcref(*kr); + MSize tp, need = 1; + /* Determine constant type and needed size. */ + if (o->gch.gct == ~LJ_TSTR) { + tp = BCDUMP_KGC_STR + gco2str(o)->len; + need = 5+gco2str(o)->len; + } else if (o->gch.gct == ~LJ_TPROTO) { + lua_assert((pt->flags & PROTO_CHILD)); + tp = BCDUMP_KGC_CHILD; +#if LJ_HASFFI + } else if (o->gch.gct == ~LJ_TCDATA) { + CTypeID id = gco2cd(o)->typeid; + need = 1+4*5; + if (id == CTID_INT64) { + tp = BCDUMP_KGC_I64; + } else if (id == CTID_UINT64) { + tp = BCDUMP_KGC_U64; + } else { + lua_assert(id == CTID_COMPLEX_DOUBLE); + tp = BCDUMP_KGC_COMPLEX; + } +#endif + } else { + lua_assert(o->gch.gct == ~LJ_TTAB); + tp = BCDUMP_KGC_TAB; + } + /* Write constant type. */ + bcwrite_need(ctx, need); + bcwrite_uleb128(ctx, tp); + /* Write constant data (if any). */ + if (tp >= BCDUMP_KGC_STR) { + bcwrite_block(ctx, strdata(gco2str(o)), gco2str(o)->len); + } else if (tp == BCDUMP_KGC_TAB) { + bcwrite_ktab(ctx, gco2tab(o)); +#if LJ_HASFFI + } else if (tp != BCDUMP_KGC_CHILD) { + cTValue *p = (TValue *)cdataptr(gco2cd(o)); + bcwrite_uleb128(ctx, p[0].u32.lo); + bcwrite_uleb128(ctx, p[0].u32.hi); + if (tp == BCDUMP_KGC_COMPLEX) { + bcwrite_uleb128(ctx, p[1].u32.lo); + bcwrite_uleb128(ctx, p[1].u32.hi); + } +#endif + } + } +} + +/* Write number constants of a prototype. */ +static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt) +{ + MSize i, sizekn = pt->sizekn; + cTValue *o = mref(pt->k, TValue); + bcwrite_need(ctx, 10*sizekn); + for (i = 0; i < sizekn; i++, o++) { + int32_t k; + if (tvisint(o)) { + k = intV(o); + goto save_int; + } else { + /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */ + if (!LJ_DUALNUM) { /* Narrow number constants to integers. */ + lua_Number num = numV(o); + k = lj_num2int(num); + if (num == (lua_Number)k) { /* -0 is never a constant. */ + save_int: + bcwrite_uleb128(ctx, 2*(uint32_t)k); + if (k < 0) ctx->sb.buf[ctx->sb.n-1] |= 0x10; + continue; + } + } + bcwrite_uleb128(ctx, 1+2*o->u32.lo); + if (o->u32.lo >= 0x80000000u) ctx->sb.buf[ctx->sb.n-1] |= 0x10; + bcwrite_uleb128(ctx, o->u32.hi); + } + } +} + +/* Write bytecode instructions. */ +static void bcwrite_bytecode(BCWriteCtx *ctx, GCproto *pt) +{ + MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */ +#if LJ_HASJIT + uint8_t *p = (uint8_t *)&ctx->sb.buf[ctx->sb.n]; +#endif + bcwrite_block(ctx, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns)); +#if LJ_HASJIT + /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */ + if ((pt->flags & PROTO_ILOOP) || pt->trace) { + jit_State *J = L2J(ctx->L); + MSize i; + for (i = 0; i < nbc; i++, p += sizeof(BCIns)) { + BCOp op = (BCOp)p[LJ_ENDIAN_SELECT(0, 3)]; + if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP || + op == BC_JFORI) { + p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL); + } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { + BCReg rd = p[LJ_ENDIAN_SELECT(2, 1)] + (p[LJ_ENDIAN_SELECT(3, 0)] << 8); + BCIns ins = traceref(J, rd)->startins; + p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL); + p[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins); + p[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins); + } + } + } +#endif +} + +/* Write prototype. */ +static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt) +{ + MSize sizedbg = 0; + + /* Recursively write children of prototype. */ + if ((pt->flags & PROTO_CHILD)) { + ptrdiff_t i, n = pt->sizekgc; + GCRef *kr = mref(pt->k, GCRef) - 1; + for (i = 0; i < n; i++, kr--) { + GCobj *o = gcref(*kr); + if (o->gch.gct == ~LJ_TPROTO) + bcwrite_proto(ctx, gco2pt(o)); + } + } + + /* Start writing the prototype info to a buffer. */ + lj_str_resetbuf(&ctx->sb); + ctx->sb.n = 5; /* Leave room for final size. */ + bcwrite_need(ctx, 4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2); + + /* Write prototype header. */ + bcwrite_byte(ctx, (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI))); + bcwrite_byte(ctx, pt->numparams); + bcwrite_byte(ctx, pt->framesize); + bcwrite_byte(ctx, pt->sizeuv); + bcwrite_uleb128(ctx, pt->sizekgc); + bcwrite_uleb128(ctx, pt->sizekn); + bcwrite_uleb128(ctx, pt->sizebc-1); + if (!ctx->strip) { + if (proto_lineinfo(pt)) + sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt); + bcwrite_uleb128(ctx, sizedbg); + if (sizedbg) { + bcwrite_uleb128(ctx, pt->firstline); + bcwrite_uleb128(ctx, pt->numline); + } + } + + /* Write bytecode instructions and upvalue refs. */ + bcwrite_bytecode(ctx, pt); + bcwrite_block(ctx, proto_uv(pt), pt->sizeuv*2); + + /* Write constants. */ + bcwrite_kgc(ctx, pt); + bcwrite_knum(ctx, pt); + + /* Write debug info, if not stripped. */ + if (sizedbg) { + bcwrite_need(ctx, sizedbg); + bcwrite_block(ctx, proto_lineinfo(pt), sizedbg); + } + + /* Pass buffer to writer function. */ + if (ctx->status == 0) { + MSize n = ctx->sb.n - 5; + MSize nn = 1 + lj_fls(n)/7; + ctx->sb.n = 5 - nn; + bcwrite_uleb128(ctx, n); /* Fill in final size. */ + lua_assert(ctx->sb.n == 5); + ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf+5-nn, nn+n, ctx->wdata); + } +} + +/* Write header of bytecode dump. */ +static void bcwrite_header(BCWriteCtx *ctx) +{ + GCstr *chunkname = proto_chunkname(ctx->pt); + const char *name = strdata(chunkname); + MSize len = chunkname->len; + lj_str_resetbuf(&ctx->sb); + bcwrite_need(ctx, 5+5+len); + bcwrite_byte(ctx, BCDUMP_HEAD1); + bcwrite_byte(ctx, BCDUMP_HEAD2); + bcwrite_byte(ctx, BCDUMP_HEAD3); + bcwrite_byte(ctx, BCDUMP_VERSION); + bcwrite_byte(ctx, (ctx->strip ? BCDUMP_F_STRIP : 0) + + (LJ_BE ? BCDUMP_F_BE : 0) + + ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0)); + if (!ctx->strip) { + bcwrite_uleb128(ctx, len); + bcwrite_block(ctx, name, len); + } + ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf, ctx->sb.n, ctx->wdata); +} + +/* Write footer of bytecode dump. */ +static void bcwrite_footer(BCWriteCtx *ctx) +{ + if (ctx->status == 0) { + uint8_t zero = 0; + ctx->status = ctx->wfunc(ctx->L, &zero, 1, ctx->wdata); + } +} + +/* Protected callback for bytecode writer. */ +static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud) +{ + BCWriteCtx *ctx = (BCWriteCtx *)ud; + UNUSED(dummy); + lj_str_resizebuf(L, &ctx->sb, 1024); /* Avoids resize for most prototypes. */ + bcwrite_header(ctx); + bcwrite_proto(ctx, ctx->pt); + bcwrite_footer(ctx); + return NULL; +} + +/* Write bytecode for a prototype. */ +int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data, + int strip) +{ + BCWriteCtx ctx; + int status; + ctx.L = L; + ctx.pt = pt; + ctx.wfunc = writer; + ctx.wdata = data; + ctx.strip = strip; + ctx.status = 0; + lj_str_initbuf(&ctx.sb); + status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); + if (status == 0) status = ctx.status; + lj_str_freebuf(G(ctx.L), &ctx.sb); + return status; +} + diff --git a/src/luajit2/src/lj_carith.c b/src/luajit2/src/lj_carith.c new file mode 100644 index 0000000000..8f644d8358 --- /dev/null +++ b/src/luajit2/src/lj_carith.c @@ -0,0 +1,309 @@ +/* +** C data arithmetic. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_carith.h" + +/* -- C data arithmetic --------------------------------------------------- */ + +/* Binary operands of an operator converted to ctypes. */ +typedef struct CDArith { + uint8_t *p[2]; + CType *ct[2]; +} CDArith; + +/* Check arguments for arithmetic metamethods. */ +static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca) +{ + TValue *o = L->base; + int ok = 1; + MSize i; + if (o+1 >= L->top) + lj_err_argt(L, 1, LUA_TCDATA); + for (i = 0; i < 2; i++, o++) { + if (tviscdata(o)) { + GCcdata *cd = cdataV(o); + CType *ct = ctype_raw(cts, (CTypeID)cd->typeid); + uint8_t *p = (uint8_t *)cdataptr(cd); + if (ctype_isptr(ct->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + } + ca->ct[i] = ct; + ca->p[i] = p; + } else if (tvisint(o)) { + ca->ct[i] = ctype_get(cts, CTID_INT32); + ca->p[i] = (uint8_t *)&o->i; + } else if (tvisnum(o)) { + ca->ct[i] = ctype_get(cts, CTID_DOUBLE); + ca->p[i] = (uint8_t *)&o->n; + } else if (tvisnil(o)) { + ca->ct[i] = ctype_get(cts, CTID_P_VOID); + ca->p[i] = (uint8_t *)0; + } else { + ca->ct[i] = NULL; + ca->p[i] = NULL; + ok = 0; + } + } + return ok; +} + +/* Pointer arithmetic. */ +static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + CType *ctp = ca->ct[0]; + uint8_t *pp = ca->p[0]; + ptrdiff_t idx; + CTSize sz; + CTypeID id; + GCcdata *cd; + if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { + if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && + (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { + uint8_t *pp2 = ca->p[1]; + if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */ + setboolV(L->top-1, (pp == pp2)); + return 1; + } + if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL)) + return 0; + if (mm == MM_sub) { /* Pointer difference. */ + intptr_t diff; + sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ + if (sz == 0 || sz == CTSIZE_INVALID) + return 0; + diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz; + /* All valid pointer differences on x64 are in (-2^47, +2^47), + ** which fits into a double without loss of precision. + */ + setintptrV(L->top-1, (int32_t)diff); + return 1; + } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */ + setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); + return 1; + } else { + lua_assert(mm == MM_le); + setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); + return 1; + } + } + if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info))) + return 0; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1], + (uint8_t *)&idx, ca->p[1], 0); + if (mm == MM_sub) idx = -idx; + } else if (mm == MM_add && ctype_isnum(ctp->info) && + (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) { + /* Swap pointer and index. */ + ctp = ca->ct[1]; pp = ca->p[1]; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0], + (uint8_t *)&idx, ca->p[0], 0); + } else { + return 0; + } + sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */ + if (sz == CTSIZE_INVALID) + return 0; + pp += idx*(int32_t)sz; /* Compute pointer + index. */ + id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), + CTSIZE_PTR); + cd = lj_cdata_new(cts, id, CTSIZE_PTR); + *(uint8_t **)cdataptr(cd) = pp; + setcdataV(L, L->top-1, cd); + lj_gc_check(L); + return 1; +} + +/* 64 bit integer arithmetic. */ +static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 && + ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) { + CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) || + ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ? + CTID_UINT64 : CTID_INT64; + CType *ct = ctype_get(cts, id); + GCcdata *cd; + uint64_t u0, u1, *up; + lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0); + if (mm != MM_unm) + lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0); + switch (mm) { + case MM_eq: + setboolV(L->top-1, (u0 == u1)); + return 1; + case MM_lt: + setboolV(L->top-1, + id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1)); + return 1; + case MM_le: + setboolV(L->top-1, + id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1)); + return 1; + default: break; + } + cd = lj_cdata_new(cts, id, 8); + up = (uint64_t *)cdataptr(cd); + setcdataV(L, L->top-1, cd); + switch (mm) { + case MM_add: *up = u0 + u1; break; + case MM_sub: *up = u0 - u1; break; + case MM_mul: *up = u0 * u1; break; + case MM_div: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_divu64(u0, u1); + break; + case MM_mod: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_modu64(u0, u1); + break; + case MM_pow: + if (id == CTID_INT64) + *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1); + else + *up = lj_carith_powu64(u0, u1); + break; + case MM_unm: *up = (uint64_t)-(int64_t)u0; break; + default: lua_assert(0); break; + } + lj_gc_check(L); + return 1; + } + return 0; +} + +/* Handle ctype arithmetic metamethods. */ +static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm) +{ + cTValue *tv = NULL; + if (tviscdata(L->base)) + tv = lj_ctype_meta(cts, cdataV(L->base)->typeid, mm); + if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) + tv = lj_ctype_meta(cts, cdataV(L->base+1)->typeid, mm); + if (!tv) { + const char *repr[2]; + int i; + for (i = 0; i < 2; i++) { + if (ca->ct[i]) + repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL)); + else + repr[i] = typename(&L->base[i]); + } + lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN : + mm == MM_concat ? LJ_ERR_FFI_BADCONCAT : + mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH, + repr[0], repr[1]); + } + return lj_meta_tailcall(L, tv); +} + +/* Arithmetic operators for cdata. */ +int lj_carith_op(lua_State *L, MMS mm) +{ + CTState *cts = ctype_cts(L); + CDArith ca; + if (carith_checkarg(L, cts, &ca)) { + if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) { + copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */ + return 1; + } + } + return lj_carith_meta(L, cts, &ca, mm); +} + +/* -- 64 bit integer arithmetic helpers ----------------------------------- */ + +#if LJ_32 && LJ_HASJIT +/* Signed/unsigned 64 bit multiplication. */ +int64_t lj_carith_mul64(int64_t a, int64_t b) +{ + return a * b; +} +#endif + +/* Unsigned 64 bit division. */ +uint64_t lj_carith_divu64(uint64_t a, uint64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + return a / b; +} + +/* Signed 64 bit division. */ +int64_t lj_carith_divi64(int64_t a, int64_t b) +{ + if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1)) + return U64x(80000000,00000000); + return a / b; +} + +/* Unsigned 64 bit modulo. */ +uint64_t lj_carith_modu64(uint64_t a, uint64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + return a % b; +} + +/* Signed 64 bit modulo. */ +int64_t lj_carith_modi64(int64_t a, int64_t b) +{ + if (b == 0) return U64x(80000000,00000000); + if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0; + return a % b; +} + +/* Unsigned 64 bit x^k. */ +uint64_t lj_carith_powu64(uint64_t x, uint64_t k) +{ + uint64_t y; + if (k == 0) + return 1; + for (; (k & 1) == 0; k >>= 1) x *= x; + y = x; + if ((k >>= 1) != 0) { + for (;;) { + x *= x; + if (k == 1) break; + if (k & 1) y *= x; + k >>= 1; + } + y *= x; + } + return y; +} + +/* Signed 64 bit x^k. */ +int64_t lj_carith_powi64(int64_t x, int64_t k) +{ + if (k == 0) + return 1; + if (k < 0) { + if (x == 0) + return U64x(7fffffff,ffffffff); + else if (x == 1) + return 1; + else if (x == -1) + return (k & 1) ? -1 : 1; + else + return 0; + } + return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k); +} + +#endif diff --git a/src/luajit2/src/lj_carith.h b/src/luajit2/src/lj_carith.h new file mode 100644 index 0000000000..c39594acd3 --- /dev/null +++ b/src/luajit2/src/lj_carith.h @@ -0,0 +1,27 @@ +/* +** C data arithmetic. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CARITH_H +#define _LJ_CARITH_H + +#include "lj_obj.h" + +#if LJ_HASFFI + +LJ_FUNC int lj_carith_op(lua_State *L, MMS mm); + +#if LJ_32 && LJ_HASJIT +LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k); +#endif +LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b); +LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b); +LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b); +LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b); +LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k); +LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k); + +#endif + +#endif diff --git a/src/luajit2/src/lj_ccall.c b/src/luajit2/src/lj_ccall.c new file mode 100644 index 0000000000..6a163b0e47 --- /dev/null +++ b/src/luajit2/src/lj_ccall.c @@ -0,0 +1,609 @@ +/* +** FFI C call handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_ccall.h" +#include "lj_trace.h" + +/* Target-specific handling of register arguments. */ +#if LJ_TARGET_X86 +/* -- x86 calling conventions --------------------------------------------- */ + +#if LJ_ABI_WIN + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs bigger than 8 by reference (on stack only). */ \ + cc->retref = (sz > 8); \ + if (cc->retref) cc->stack[nsp++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET + +#else + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \ + if (ngpr < maxgpr) \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + else \ + cc->stack[nsp++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + /* Return complex float in GPRs and complex double by reference. */ \ + cc->retref = (sz > 8); \ + if (cc->retref) { \ + if (ngpr < maxgpr) \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + else \ + cc->stack[nsp++] = (GPRArg)dp; \ + } + +#endif + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (!cc->retref) \ + *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + ngpr = maxgpr; /* Pass all structs by value on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + isfp = 1; /* Pass complex by value on stack. */ + +#define CCALL_HANDLE_REGARG \ + if (!isfp) { /* Only non-FP values may be passed in registers. */ \ + if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \ + if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \ + } else if (ngpr + 1 <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_X64 && LJ_ABI_WIN +/* -- Windows/x64 calling conventions ------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \ + cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \ + if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (!cc->retref) \ + *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \ + if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \ + } + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex float in a GPR and complex double by reference. */ \ + if (sz != 2*sizeof(float)) { \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; \ + } + +/* Windows/x64 argument registers are strictly positional (use ngpr). */ +#define CCALL_HANDLE_REGARG \ + if (isfp) { \ + if (ngpr < 4) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \ + } else { \ + if (ngpr < 4) { dp = &cc->gpr[ngpr++]; goto done; } \ + } + +#elif LJ_TARGET_X64 +/* -- POSIX/x64 calling conventions --------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + if (ccall_classify_struct(cts, ctr, rcl, 0)) { \ + cc->retref = 1; /* Return struct by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; \ + } else { \ + cc->retref = 0; /* Return small structs in registers. */ \ + } + +#define CCALL_HANDLE_STRUCTRET2 \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + ccall_classify_struct(cts, ctr, rcl, 0); \ + ccall_struct_ret(cc, rcl, dp, ctr->size); + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in one or two FPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \ + *(int64_t *)dp = cc->fpr[0].l[0]; \ + } else { /* Copy non-contiguous complex double from FPRs. */ \ + ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \ + ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \ + } + +#define CCALL_HANDLE_STRUCTARG \ + int rcl[2]; rcl[0] = rcl[1] = 0; \ + if (!ccall_classify_struct(cts, d, rcl, 0)) { \ + cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \ + if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \ + nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \ + continue; \ + } /* Pass all other structs by value on stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */ + +#define CCALL_HANDLE_REGARG \ + if (isfp) { /* Try to pass argument in FPRs. */ \ + if (nfpr + n <= CCALL_NARG_FPR) { \ + dp = &cc->fpr[nfpr]; \ + nfpr += n; \ + goto done; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ + /* Note that reordering is explicitly allowed in the x64 ABI. */ \ + if (n <= 2 && ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } \ + } + +#elif LJ_TARGET_ARM +/* -- ARM calling conventions --------------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + /* Return structs of size <= 4 in a GPR. */ \ + cc->retref = !(sz <= 4); \ + if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + cc->retref = 1; /* Return all complex values by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET2 \ + UNUSED(dp); /* Nothing to do. */ + +#define CCALL_HANDLE_STRUCTARG \ + /* Pass all structs by value in registers and/or on the stack. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in 2 or 4 GPRs. */ + +/* ARM has a softfp ABI. */ +#define CCALL_HANDLE_REGARG \ + if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \ + if (ngpr < maxgpr) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + else \ + nsp = (nsp + 1u) & ~1u; /* Align argument on stack. */ \ + } \ + if (ngpr < maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + if (ngpr + n > maxgpr) { \ + nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ + if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ + ngpr = maxgpr; \ + } else { \ + ngpr += n; \ + } \ + goto done; \ + } + +#elif LJ_TARGET_PPCSPE +/* -- PPC/SPE calling conventions ----------------------------------------- */ + +#define CCALL_HANDLE_STRUCTRET \ + cc->retref = 1; /* Return all structs by reference. */ \ + cc->gpr[ngpr++] = (GPRArg)dp; + +#define CCALL_HANDLE_COMPLEXRET \ + /* Complex values are returned in 2 or 4 GPRs. */ \ + cc->retref = 0; + +#define CCALL_HANDLE_COMPLEXRET2 \ + memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */ + +#define CCALL_HANDLE_STRUCTARG \ + rp = cdataptr(lj_cdata_new(cts, did, sz)); \ + sz = CTSIZE_PTR; /* Pass all structs by reference. */ + +#define CCALL_HANDLE_COMPLEXARG \ + /* Pass complex by value in 2 or 4 GPRs. */ + +/* PPC/SPE has a softfp ABI. */ +#define CCALL_HANDLE_REGARG \ + if (n > 1) { /* Doesn't fit in a single GPR? */ \ + lua_assert(n == 2 || n == 4); /* int64_t, double or complex (float). */ \ + if (n == 2) \ + ngpr = (ngpr + 1u) & ~1u; /* Only align 64 bit value to regpair. */ \ + else if (ngpr + n > maxgpr) \ + ngpr = maxgpr; /* Prevent reordering. */ \ + } \ + if (ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ + ngpr += n; \ + goto done; \ + } + +#else +#error "missing calling convention definitions for this architecture" +#endif + +#ifndef CCALL_HANDLE_STRUCTRET2 +#define CCALL_HANDLE_STRUCTRET2 \ + memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */ +#endif + +/* -- x64 struct classification ------------------------------------------- */ + +#if LJ_TARGET_X64 && !LJ_ABI_WIN + +/* Register classes for x64 struct classification. */ +#define CCALL_RCL_INT 1 +#define CCALL_RCL_SSE 2 +#define CCALL_RCL_MEM 4 +/* NYI: classify vectors. */ + +static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs); + +/* Classify a C type. */ +static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs) +{ + if (ctype_isarray(ct->info)) { + CType *cct = ctype_rawchild(cts, ct); + CTSize eofs, esz = cct->size, asz = ct->size; + for (eofs = 0; eofs < asz; eofs += esz) + ccall_classify_ct(cts, cct, rcl, ofs+eofs); + } else if (ctype_isstruct(ct->info)) { + ccall_classify_struct(cts, ct, rcl, ofs); + } else { + int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; + lua_assert(ctype_hassize(ct->info)); + if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ + rcl[(ofs >= 8)] |= cl; + } +} + +/* Recursively classify a struct based on its fields. */ +static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs) +{ + if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */ + while (ct->sib) { + CTSize fofs; + ct = ctype_get(cts, ct->sib); + fofs = ofs+ct->size; + if (ctype_isfield(ct->info)) + ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs); + else if (ctype_isbitfield(ct->info)) + rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */ + else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) + ccall_classify_struct(cts, ctype_child(cts, ct), rcl, fofs); + } + return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */ +} + +/* Try to split up a small struct into registers. */ +static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl) +{ + MSize ngpr = cc->ngpr, nfpr = cc->nfpr; + uint32_t i; + for (i = 0; i < 2; i++) { + lua_assert(!(rcl[i] & CCALL_RCL_MEM)); + if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ + if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ + cc->gpr[ngpr++] = dp[i]; + } else if ((rcl[i] & CCALL_RCL_SSE)) { + if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */ + cc->fpr[nfpr++].l[0] = dp[i]; + } + } + cc->ngpr = ngpr; cc->nfpr = nfpr; + return 0; /* Ok. */ +} + +/* Pass a small struct argument. */ +static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl, + TValue *o, int narg) +{ + GPRArg dp[2]; + dp[0] = dp[1] = 0; + /* Convert to temp. struct. */ + lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); + if (!ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */ + MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; + if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ + cc->nsp = nsp + n; + memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR); + } + return 0; /* Ok. */ +} + +/* Combine returned small struct. */ +static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz) +{ + GPRArg sp[2]; + MSize ngpr = 0, nfpr = 0; + uint32_t i; + for (i = 0; i < 2; i++) { + if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ + sp[i] = cc->gpr[ngpr++]; + } else if ((rcl[i] & CCALL_RCL_SSE)) { + sp[i] = cc->fpr[nfpr++].l[0]; + } + } + memcpy(dp, sp, sz); +} +#endif + +/* -- Common C call handling ---------------------------------------------- */ + +/* Infer the destination CTypeID for a vararg argument. */ +static CTypeID ccall_ctid_vararg(CTState *cts, cTValue *o) +{ + if (tvisnumber(o)) { + return CTID_DOUBLE; + } else if (tviscdata(o)) { + CTypeID id = cdataV(o)->typeid; + CType *s = ctype_get(cts, id); + if (ctype_isrefarray(s->info)) { + return lj_ctype_intern(cts, + CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR); + } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) { + /* NYI: how to pass a struct by value in a vararg argument? */ + return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR); + } if (ctype_isfp(s->info) && s->size == sizeof(float)) { + return CTID_DOUBLE; + } else { + return id; + } + } else if (tvisstr(o)) { + return CTID_P_CCHAR; + } else if (tvisbool(o)) { + return CTID_BOOL; + } else { + return CTID_P_VOID; + } +} + +/* Setup arguments for C call. */ +static int ccall_set_args(lua_State *L, CTState *cts, CType *ct, + CCallState *cc) +{ + int gcsteps = 0; + TValue *o, *top = L->top; + CTypeID fid; + CType *ctr; + MSize maxgpr, ngpr = 0, nsp = 0, narg; +#if CCALL_NARG_FPR + MSize nfpr = 0; +#endif + + /* Clear unused regs to get some determinism in case of misdeclaration. */ + memset(cc->gpr, 0, sizeof(cc->gpr)); +#if CCALL_NUM_FPR + memset(cc->fpr, 0, sizeof(cc->fpr)); +#endif + +#if LJ_TARGET_X86 + /* x86 has several different calling conventions. */ + cc->resx87 = 0; + switch (ctype_cconv(ct->info)) { + case CTCC_FASTCALL: maxgpr = 2; break; + case CTCC_THISCALL: maxgpr = 1; break; + default: maxgpr = 0; break; + } +#else + maxgpr = CCALL_NARG_GPR; +#endif + + /* Perform required setup for some result types. */ + ctr = ctype_rawchild(cts, ct); + if (ctype_isvector(ctr->info)) { + if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16))) + goto err_nyi; + } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) { + /* Preallocate cdata object and anchor it after arguments. */ + CTSize sz = ctr->size; + GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz); + void *dp = cdataptr(cd); + setcdataV(L, L->top++, cd); + if (ctype_isstruct(ctr->info)) { + CCALL_HANDLE_STRUCTRET + } else { + CCALL_HANDLE_COMPLEXRET + } +#if LJ_TARGET_X86 + } else if (ctype_isfp(ctr->info)) { + cc->resx87 = ctr->size == sizeof(float) ? 1 : 2; +#endif + } + + /* Skip initial attributes. */ + fid = ct->sib; + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (!ctype_isattrib(ctf->info)) break; + fid = ctf->sib; + } + + /* Walk through all passed arguments. */ + for (o = L->base+1, narg = 1; o < top; o++, narg++) { + CTypeID did; + CType *d; + CTSize sz; + MSize n, isfp = 0, isva = 0; + void *dp, *rp = NULL; + + if (fid) { /* Get argument type from field. */ + CType *ctf = ctype_get(cts, fid); + fid = ctf->sib; + lua_assert(ctype_isfield(ctf->info)); + did = ctype_cid(ctf->info); + } else { + if (!(ct->info & CTF_VARARG)) + lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */ + did = ccall_ctid_vararg(cts, o); /* Infer vararg type. */ + isva = 1; + } + d = ctype_raw(cts, did); + sz = d->size; + + /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */ + if (ctype_isnum(d->info)) { + if (sz > 8) goto err_nyi; + if ((d->info & CTF_FP)) { + isfp = 1; + } else if (sz < CTSIZE_PTR) { + d = ctype_get(cts, CTID_INT_PSZ); + } + } else if (ctype_isvector(d->info)) { + if (CCALL_VECTOR_REG && (sz == 8 || sz == 16)) + isfp = 1; + else + goto err_nyi; + } else if (ctype_isstruct(d->info)) { + CCALL_HANDLE_STRUCTARG + } else if (ctype_iscomplex(d->info)) { + CCALL_HANDLE_COMPLEXARG + } else { + sz = CTSIZE_PTR; + } + sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); + n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */ + + CCALL_HANDLE_REGARG /* Handle register arguments. */ + + /* Otherwise pass argument on stack. */ + if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) { + MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1; + nsp = (nsp + align) & ~align; /* Align argument on stack. */ + } + if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */ + err_nyi: + lj_err_caller(L, LJ_ERR_FFI_NYICALL); + } + dp = &cc->stack[nsp]; + nsp += n; + isva = 0; + + done: + if (rp) { /* Pass by reference. */ + gcsteps++; + *(void **)dp = rp; + dp = rp; + } + lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); +#if LJ_TARGET_X64 && LJ_ABI_WIN + if (isva) { /* Windows/x64 mirrors varargs in both register sets. */ + if (nfpr == ngpr) + cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0]; + else + cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1]; + } +#else + UNUSED(isva); +#endif +#if LJ_TARGET_X64 && !LJ_ABI_WIN + if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) { + cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */ + cc->fpr[nfpr-2].d[1] = 0; + } +#else + UNUSED(isfp); +#endif + } + if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */ + +#if LJ_TARGET_X64 + cc->nfpr = nfpr; /* Required for vararg functions. */ +#endif + cc->nsp = nsp; + cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR; + if (nsp > CCALL_SPS_FREE) + cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u); + return gcsteps; +} + +/* Get results from C call. */ +static int ccall_get_results(lua_State *L, CTState *cts, CType *ct, + CCallState *cc, int *ret) +{ + CType *ctr = ctype_rawchild(cts, ct); + void *sp = &cc->gpr[0]; + if (ctype_isvoid(ctr->info)) { + *ret = 0; /* Zero results. */ + return 0; /* No additional GC step. */ + } + *ret = 1; /* One result. */ + if (ctype_isstruct(ctr->info)) { + /* Return cdata object which is already on top of stack. */ + if (!cc->retref) { + void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ + CCALL_HANDLE_STRUCTRET2 + } + return 1; /* One GC step. */ + } + if (ctype_iscomplex(ctr->info)) { + /* Return cdata object which is already on top of stack. */ + void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */ + CCALL_HANDLE_COMPLEXRET2 + return 1; /* One GC step. */ + } +#if CCALL_NUM_FPR + if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info)) + sp = &cc->fpr[0]; +#endif + /* No reference types end up here, so there's no need for the CTypeID. */ + lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info))); + if (ctype_isenum(ctr->info)) ctr = ctype_child(cts, ctr); + return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, (uint8_t *)sp); +} + +/* Call C function. */ +int lj_ccall_func(lua_State *L, GCcdata *cd) +{ + CTState *cts = ctype_cts(L); + CType *ct = ctype_raw(cts, cd->typeid); + CTSize sz = CTSIZE_PTR; + if (ctype_isptr(ct->info)) { + sz = ct->size; + ct = ctype_rawchild(cts, ct); + } + if (ctype_isfunc(ct->info)) { + CCallState cc; + int gcsteps, ret; + cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz); + gcsteps = ccall_set_args(L, cts, ct, &cc); + lj_vm_ffi_call(&cc); + gcsteps += ccall_get_results(L, cts, ct, &cc, &ret); +#if LJ_TARGET_X86 && LJ_ABI_WIN + /* Automatically detect __stdcall and fix up C function declaration. */ + if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) { + CTF_INSERT(ct->info, CCONV, CTCC_STDCALL); + lj_trace_abort(G(L)); + } +#endif + while (gcsteps-- > 0) + lj_gc_check(L); + return ret; + } + return -1; /* Not a function. */ +} + +#endif diff --git a/src/luajit2/src/lj_ccall.h b/src/luajit2/src/lj_ccall.h new file mode 100644 index 0000000000..d097978109 --- /dev/null +++ b/src/luajit2/src/lj_ccall.h @@ -0,0 +1,119 @@ +/* +** FFI C call handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CCALL_H +#define _LJ_CCALL_H + +#include "lj_obj.h" + +#if LJ_HASFFI + +/* -- C calling conventions ----------------------------------------------- */ + +#if LJ_TARGET_X86ORX64 + +#if LJ_TARGET_X86 +#define CCALL_NARG_GPR 2 /* For fastcall arguments. */ +#define CCALL_NARG_FPR 0 +#define CCALL_NRET_GPR 2 +#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */ +#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */ +#elif LJ_ABI_WIN +#define CCALL_NARG_GPR 4 +#define CCALL_NARG_FPR 4 +#define CCALL_NRET_GPR 1 +#define CCALL_NRET_FPR 1 +#define CCALL_SPS_EXTRA 4 +#else +#define CCALL_NARG_GPR 6 +#define CCALL_NARG_FPR 8 +#define CCALL_NRET_GPR 2 +#define CCALL_NRET_FPR 2 +#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */ +#endif + +#define CCALL_SPS_FREE 1 + +typedef LJ_ALIGN(16) union FPRArg { + double d[2]; + float f[4]; + uint8_t b[16]; + uint16_t s[8]; + int i[4]; + int64_t l[2]; +} FPRArg; + +typedef intptr_t GPRArg; + +#elif LJ_TARGET_ARM + +#define CCALL_NARG_GPR 4 +#define CCALL_NARG_FPR 0 +#define CCALL_NRET_GPR 2 /* For softfp double. */ +#define CCALL_NRET_FPR 0 +#define CCALL_SPS_FREE 0 /* NYI */ + +typedef intptr_t GPRArg; + +#elif LJ_TARGET_PPCSPE + +#define CCALL_NARG_GPR 8 +#define CCALL_NARG_FPR 0 +#define CCALL_NRET_GPR 4 /* For softfp complex double. */ +#define CCALL_NRET_FPR 0 +#define CCALL_SPS_FREE 0 /* NYI */ + +typedef intptr_t GPRArg; + +#else +#error "missing calling convention definitions for this architecture" +#endif + +#ifndef CCALL_SPS_EXTRA +#define CCALL_SPS_EXTRA 0 +#endif +#ifndef CCALL_VECTOR_REG +#define CCALL_VECTOR_REG 0 +#endif +#ifndef CCALL_ALIGN_STACKARG +#define CCALL_ALIGN_STACKARG 1 +#endif + +#define CCALL_NUM_GPR \ + (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR) +#define CCALL_NUM_FPR \ + (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR) + +#define CCALL_MAXSTACK 32 + +/* -- C call state -------------------------------------------------------- */ + +typedef struct CCallState { + void (*func)(void); /* Pointer to called function. */ + uint32_t spadj; /* Stack pointer adjustment. */ + uint8_t nsp; /* Number of stack slots. */ + uint8_t retref; /* Return value by reference. */ +#if LJ_TARGET_X64 + uint8_t ngpr; /* Number of arguments in GPRs. */ + uint8_t nfpr; /* Number of arguments in FPRs. */ +#elif LJ_TARGET_X86 + uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */ +#endif +#if CCALL_NUM_FPR + FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */ +#endif + GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */ + GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */ +} CCallState; + +/* -- C call handling ----------------------------------------------------- */ + +/* Really belongs to lj_vm.h. */ +LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc); +LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd); + +#endif + +#endif diff --git a/src/luajit2/src/lj_cconv.c b/src/luajit2/src/lj_cconv.c new file mode 100644 index 0000000000..884edef1a6 --- /dev/null +++ b/src/luajit2/src/lj_cconv.c @@ -0,0 +1,734 @@ +/* +** C type conversions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_cconv.h" + +/* -- Conversion errors --------------------------------------------------- */ + +/* Bad conversion. */ +LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s, + CTInfo flags) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + const char *src; + if ((flags & CCF_FROMTV)) + src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER : + ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)]; + else + src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL)); + if (CCF_GETARG(flags)) + lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); + else + lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); +} + +/* Bad conversion from TValue. */ +LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o, + CTInfo flags) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + const char *src = typename(o); + if (CCF_GETARG(flags)) + lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst); + else + lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst); +} + +/* Initializer overflow. */ +LJ_NORET static void cconv_err_initov(CTState *cts, CType *d) +{ + const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL)); + lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst); +} + +/* -- C type compatibility checks ----------------------------------------- */ + +/* Get raw type and qualifiers for a child type. Resolves enums, too. */ +static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual) +{ + ct = ctype_child(cts, ct); + for (;;) { + if (ctype_isattrib(ct->info)) { + if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; + } else if (!ctype_isenum(ct->info)) { + break; + } + ct = ctype_child(cts, ct); + } + *qual |= (ct->info & CTF_QUAL); + return ct; +} + +/* Check for compatible types when converting to a pointer. +** Note: these checks are more relaxed than what C99 mandates. +*/ +int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags) +{ + if (!((flags & CCF_CAST) || d == s)) { + CTInfo dqual = 0, squal = 0; + d = cconv_childqual(cts, d, &dqual); + if (!ctype_isstruct(s->info)) + s = cconv_childqual(cts, s, &squal); + if ((flags & CCF_SAME)) { + if (dqual != squal) + return 0; /* Different qualifiers. */ + } else if (!(flags & CCF_IGNQUAL)) { + if ((dqual & squal) != squal) + return 0; /* Discarded qualifiers. */ + if (ctype_isvoid(d->info) || ctype_isvoid(s->info)) + return 1; /* Converting to/from void * is always ok. */ + } + if (ctype_type(d->info) != ctype_type(s->info) || + d->size != s->size) + return 0; /* Different type or different size. */ + if (ctype_isnum(d->info)) { + if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP))) + return 0; /* Different numeric types. */ + } else if (ctype_ispointer(d->info)) { + /* Check child types for compatibility. */ + return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME); + } else if (ctype_isstruct(d->info)) { + if (d != s) + return 0; /* Must be exact same type for struct/union. */ + } else if (ctype_isfunc(d->info)) { + /* NYI: structural equality of functions. */ + } + } + return 1; /* Types are compatible. */ +} + +/* -- C type to C type conversion ----------------------------------------- */ + +/* Convert C type to C type. Caveat: expects to get the raw CType! +** +** Note: This is only used by the interpreter and not optimized at all. +** The JIT compiler will do a much better job specializing for each case. +*/ +void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, + uint8_t *dp, uint8_t *sp, CTInfo flags) +{ + CTSize dsize = d->size, ssize = s->size; + CTInfo dinfo = d->info, sinfo = s->info; + void *tmpptr; + + lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo)); + lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo)); + + if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) + goto err_conv; + + /* Some basic sanity checks. */ + lua_assert(!ctype_isnum(dinfo) || dsize > 0); + lua_assert(!ctype_isnum(sinfo) || ssize > 0); + lua_assert(!ctype_isbool(dinfo) || dsize == 1); + lua_assert(!ctype_isbool(sinfo) || ssize == 1); + lua_assert(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize); + lua_assert(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize); + + switch (cconv_idx2(dinfo, sinfo)) { + /* Destination is a bool. */ + case CCX(B, B): + *dp = *sp; /* Source operand is already normalized. */ + break; + case CCX(B, I): { + MSize i; + uint8_t b = 0; + for (i = 0; i < ssize; i++) b |= sp[i]; + *dp = (b != 0); + break; + } + case CCX(B, F): { + uint8_t b; + if (ssize == sizeof(double)) b = (*(double *)sp != 0); + else if (ssize == sizeof(float)) b = (*(float *)sp != 0); + else goto err_conv; /* NYI: long double. */ + *dp = b; + break; + } + + /* Destination is an integer. */ + case CCX(I, B): + case CCX(I, I): + conv_I_I: + if (dsize > ssize) { /* Zero-extend or sign-extend LSB. */ +#if LJ_LE + uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0; + memcpy(dp, sp, ssize); + memset(dp + ssize, fill, dsize-ssize); +#else + uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0; + memset(dp, fill, dsize-ssize); + memcpy(dp + (dsize-ssize), sp, ssize); +#endif + } else { /* Copy LSB. */ +#if LJ_LE + memcpy(dp, sp, dsize); +#else + memcpy(dp, sp + (ssize-dsize), dsize); +#endif + } + break; + case CCX(I, F): { + double n; /* Always convert via double. */ + conv_I_F: + /* Convert source to double. */ + if (ssize == sizeof(double)) n = *(double *)sp; + else if (ssize == sizeof(float)) n = (double)*(float *)sp; + else goto err_conv; /* NYI: long double. */ + /* Then convert double to integer. */ + /* The conversion must exactly match the semantics of JIT-compiled code! */ + if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) { + int32_t i = (int32_t)n; + if (dsize == 4) *(int32_t *)dp = i; + else if (dsize == 2) *(int16_t *)dp = (int16_t)i; + else *(int8_t *)dp = (int8_t)i; + } else if (dsize == 4) { + *(uint32_t *)dp = (uint32_t)n; + } else if (dsize == 8) { + if (!(dinfo & CTF_UNSIGNED)) + *(int64_t *)dp = (int64_t)n; + else + *(uint64_t *)dp = lj_num2u64(n); + } else { + goto err_conv; /* NYI: conversion to >64 bit integers. */ + } + break; + } + case CCX(I, C): + s = ctype_child(cts, s); + sinfo = s->info; + ssize = s->size; + goto conv_I_F; /* Just convert re. */ + case CCX(I, P): + if (!(flags & CCF_CAST)) goto err_conv; + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + goto conv_I_I; + case CCX(I, A): + if (!(flags & CCF_CAST)) goto err_conv; + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + ssize = CTSIZE_PTR; + tmpptr = sp; + sp = (uint8_t *)&tmpptr; + goto conv_I_I; + + /* Destination is a floating-point number. */ + case CCX(F, B): + case CCX(F, I): { + double n; /* Always convert via double. */ + conv_F_I: + /* First convert source to double. */ + /* The conversion must exactly match the semantics of JIT-compiled code! */ + if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) { + int32_t i; + if (ssize == 4) { + i = *(int32_t *)sp; + } else if (!(sinfo & CTF_UNSIGNED)) { + if (ssize == 2) i = *(int16_t *)sp; + else i = *(int8_t *)sp; + } else { + if (ssize == 2) i = *(uint16_t *)sp; + else i = *(uint8_t *)sp; + } + n = (double)i; + } else if (ssize == 4) { + n = (double)*(uint32_t *)sp; + } else if (ssize == 8) { + if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp; + else n = (double)*(uint64_t *)sp; + } else { + goto err_conv; /* NYI: conversion from >64 bit integers. */ + } + /* Convert double to destination. */ + if (dsize == sizeof(double)) *(double *)dp = n; + else if (dsize == sizeof(float)) *(float *)dp = (float)n; + else goto err_conv; /* NYI: long double. */ + break; + } + case CCX(F, F): { + double n; /* Always convert via double. */ + conv_F_F: + if (ssize == dsize) goto copyval; + /* Convert source to double. */ + if (ssize == sizeof(double)) n = *(double *)sp; + else if (ssize == sizeof(float)) n = (double)*(float *)sp; + else goto err_conv; /* NYI: long double. */ + /* Convert double to destination. */ + if (dsize == sizeof(double)) *(double *)dp = n; + else if (dsize == sizeof(float)) *(float *)dp = (float)n; + else goto err_conv; /* NYI: long double. */ + break; + } + case CCX(F, C): + s = ctype_child(cts, s); + sinfo = s->info; + ssize = s->size; + goto conv_F_F; /* Ignore im, and convert from re. */ + + /* Destination is a complex number. */ + case CCX(C, I): + d = ctype_child(cts, d); + dinfo = d->info; + dsize = d->size; + memset(dp + dsize, 0, dsize); /* Clear im. */ + goto conv_F_I; /* Convert to re. */ + case CCX(C, F): + d = ctype_child(cts, d); + dinfo = d->info; + dsize = d->size; + memset(dp + dsize, 0, dsize); /* Clear im. */ + goto conv_F_F; /* Convert to re. */ + + case CCX(C, C): + if (dsize != ssize) { /* Different types: convert re/im separately. */ + CType *dc = ctype_child(cts, d); + CType *sc = ctype_child(cts, s); + lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags); + lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags); + return; + } + goto copyval; /* Otherwise this is easy. */ + + /* Destination is a vector. */ + case CCX(V, I): + case CCX(V, F): + case CCX(V, C): { + CType *dc = ctype_child(cts, d); + CTSize esize; + /* First convert the scalar to the first element. */ + lj_cconv_ct_ct(cts, dc, s, dp, sp, flags); + /* Then replicate it to the other elements (splat). */ + for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) { + dp += esize; + memcpy(dp, sp, esize); + } + break; + } + + case CCX(V, V): + /* Copy same-sized vectors, even for different lengths/element-types. */ + if (dsize != ssize) goto err_conv; + goto copyval; + + /* Destination is a pointer. */ + case CCX(P, I): + if (!(flags & CCF_CAST)) goto err_conv; + dinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + goto conv_I_I; + + case CCX(P, F): + if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv; + /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ + dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED); + goto conv_I_F; + + case CCX(P, P): + if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; + cdata_setptr(dp, dsize, cdata_getptr(sp, ssize)); + break; + + case CCX(P, A): + case CCX(P, S): + if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv; + cdata_setptr(dp, dsize, sp); + break; + + /* Destination is an array. */ + case CCX(A, A): + if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize || + d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags)) + goto err_conv; + goto copyval; + + /* Destination is a struct/union. */ + case CCX(S, S): + if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) + goto err_conv; /* Must be exact same type. */ +copyval: /* Copy value. */ + lua_assert(dsize == ssize); + memcpy(dp, sp, dsize); + break; + + default: + err_conv: + cconv_err_conv(cts, d, s, flags); + } +} + +/* -- C type to TValue conversion ----------------------------------------- */ + +/* Convert C type to TValue. Caveat: expects to get the raw CType! */ +int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, + TValue *o, uint8_t *sp) +{ + CTInfo sinfo = s->info; + lua_assert(!ctype_isenum(sinfo)); + if (ctype_isnum(sinfo)) { + if (!ctype_isbool(sinfo)) { + if (ctype_isinteger(sinfo) && s->size > 4) goto copyval; + if (LJ_DUALNUM && ctype_isinteger(sinfo)) { + int32_t i; + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s, + (uint8_t *)&i, sp, 0); + if ((sinfo & CTF_UNSIGNED) && i < 0) + setnumV(o, (lua_Number)(uint32_t)i); + else + setintV(o, i); + } else { + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, + (uint8_t *)&o->n, sp, 0); + /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ + lua_assert(tvisnum(o)); + } + } else { + uint32_t b = ((*sp) & 1); + setboolV(o, b); + setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */ + } + return 0; + } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { + /* Create reference. */ + setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid)); + return 1; /* Need GC step. */ + } else { + GCcdata *cd; + CTSize sz; + copyval: /* Copy value. */ + sz = s->size; + lua_assert(sz != CTSIZE_INVALID); + /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ + cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); + setcdataV(cts->L, o, cd); + memcpy(cdataptr(cd), sp, sz); + return 1; /* Need GC step. */ + } +} + +/* Convert bitfield to TValue. */ +int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp) +{ + CTInfo info = s->info; + CTSize pos, bsz; + uint32_t val; + lua_assert(ctype_isbitfield(info)); + /* NYI: packed bitfields may cause misaligned reads. */ + switch (ctype_bitcsz(info)) { + case 4: val = *(uint32_t *)sp; break; + case 2: val = *(uint16_t *)sp; break; + case 1: val = *(uint8_t *)sp; break; + default: lua_assert(0); val = 0; break; + } + /* Check if a packed bitfield crosses a container boundary. */ + pos = ctype_bitpos(info); + bsz = ctype_bitbsz(info); + lua_assert(pos < 8*ctype_bitcsz(info)); + lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); + if (pos + bsz > 8*ctype_bitcsz(info)) + lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); + if (!(info & CTF_BOOL)) { + CTSize shift = 32 - bsz; + if (!(info & CTF_UNSIGNED)) { + setintV(o, (int32_t)(val << (shift-pos)) >> shift); + } else { + val = (val << (shift-pos)) >> shift; + if (!LJ_DUALNUM || (int32_t)val < 0) + setnumV(o, (lua_Number)(uint32_t)val); + else + setintV(o, (int32_t)val); + } + } else { + lua_assert(bsz == 1); + setboolV(o, (val >> pos) & 1); + } + return 0; /* No GC step needed. */ +} + +/* -- TValue to C type conversion ----------------------------------------- */ + +/* Convert table to array. */ +static void cconv_array_tab(CTState *cts, CType *d, + uint8_t *dp, GCtab *t, CTInfo flags) +{ + int32_t i; + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize size = d->size, esize = dc->size, ofs = 0; + for (i = 0; ; i++) { + TValue *tv = (TValue *)lj_tab_getint(t, i); + if (!tv || tvisnil(tv)) { + if (i == 0) continue; /* Try again for 1-based tables. */ + break; /* Stop at first nil. */ + } + if (ofs >= size) + cconv_err_initov(cts, d); + lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags); + ofs += esize; + } + if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */ + if (ofs == esize) { /* Replicate a single element. */ + for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize); + } else { /* Otherwise fill the remainder with zero. */ + memset(dp + ofs, 0, size - ofs); + } + } +} + +/* Convert table to sub-struct/union. */ +static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp, + GCtab *t, int32_t *ip, CTInfo flags) +{ + CTypeID id = d->sib; + while (id) { + CType *df = ctype_get(cts, id); + id = df->sib; + if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { + TValue *tv; + int32_t i = *ip; + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + if (i >= 0) { + retry: + tv = (TValue *)lj_tab_getint(t, i); + if (!tv || tvisnil(tv)) { + if (i == 0) { i = 1; goto retry; } /* 1-based tables. */ + break; /* Stop at first nil. */ + } + *ip = i + 1; + } else { + tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name))); + if (!tv || tvisnil(tv)) continue; + } + if (ctype_isfield(df->info)) + lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags); + else + lj_cconv_bf_tv(cts, df, dp+df->size, tv); + if ((d->info & CTF_UNION)) break; + } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { + cconv_substruct_tab(cts, ctype_child(cts, df), dp+df->size, t, ip, flags); + } /* Ignore all other entries in the chain. */ + } +} + +/* Convert table to struct/union. */ +static void cconv_struct_tab(CTState *cts, CType *d, + uint8_t *dp, GCtab *t, CTInfo flags) +{ + int32_t i = 0; + memset(dp, 0, d->size); /* Much simpler to clear the struct first. */ + if (t->hmask) i = -1; else if (t->asize == 0) return; /* Fast exit. */ + cconv_substruct_tab(cts, d, dp, t, &i, flags); +} + +/* Convert TValue to C type. Caveat: expects to get the raw CType! */ +void lj_cconv_ct_tv(CTState *cts, CType *d, + uint8_t *dp, TValue *o, CTInfo flags) +{ + CTypeID sid = CTID_P_VOID; + CType *s; + void *tmpptr; + uint8_t tmpbool, *sp = (uint8_t *)&tmpptr; + if (LJ_LIKELY(tvisint(o))) { + sp = (uint8_t *)&o->i; + sid = CTID_INT32; + flags |= CCF_FROMTV; + } else if (LJ_LIKELY(tvisnum(o))) { + sp = (uint8_t *)&o->n; + sid = CTID_DOUBLE; + flags |= CCF_FROMTV; + } else if (tviscdata(o)) { + sp = cdataptr(cdataV(o)); + sid = cdataV(o)->typeid; + s = ctype_get(cts, sid); + if (ctype_isref(s->info)) { /* Resolve reference for value. */ + lua_assert(s->size == CTSIZE_PTR); + sp = *(void **)sp; + sid = ctype_cid(s->info); + } + s = ctype_raw(cts, sid); + if (ctype_isfunc(s->info)) { + sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR); + } else { + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + goto doconv; + } + } else if (tvisstr(o)) { + GCstr *str = strV(o); + if (ctype_isenum(d->info)) { /* Match string against enum constant. */ + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, d, str, &ofs); + if (!cct || !ctype_isconstval(cct->info)) + goto err_conv; + lua_assert(d->size == 4); + sp = (uint8_t *)&cct->size; + sid = ctype_cid(cct->info); + } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ + CType *dc = ctype_rawchild(cts, d); + CTSize sz = str->len+1; + if (!ctype_isinteger(dc->info) || dc->size != 1) + goto err_conv; + if (d->size != 0 && d->size < sz) + sz = d->size; + memcpy(dp, strdata(str), sz); + return; + } else { /* Otherwise pass it as a const char[]. */ + sp = (uint8_t *)strdata(str); + sid = CTID_A_CCHAR; + flags |= CCF_FROMTV; + } + } else if (tvistab(o)) { + if (ctype_isarray(d->info)) { + cconv_array_tab(cts, d, dp, tabV(o), flags); + return; + } else if (ctype_isstruct(d->info)) { + cconv_struct_tab(cts, d, dp, tabV(o), flags); + return; + } else { + goto err_conv; + } + } else if (tvisbool(o)) { + tmpbool = boolV(o); + sp = &tmpbool; + sid = CTID_BOOL; + } else if (tvisnil(o)) { + tmpptr = (void *)0; + flags |= CCF_FROMTV; + } else if (tvisudata(o)) { + tmpptr = uddata(udataV(o)); + } else if (tvislightud(o)) { + tmpptr = lightudV(o); + } else { + err_conv: + cconv_err_convtv(cts, d, o, flags); + } + s = ctype_get(cts, sid); +doconv: + if (ctype_isenum(d->info)) d = ctype_child(cts, d); + lj_cconv_ct_ct(cts, d, s, dp, sp, flags); +} + +/* Convert TValue to bitfield. */ +void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) +{ + CTInfo info = d->info; + CTSize pos, bsz; + uint32_t val, mask; + lua_assert(ctype_isbitfield(info)); + if ((info & CTF_BOOL)) { + uint8_t tmpbool; + lua_assert(ctype_bitbsz(info) == 1); + lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); + val = tmpbool; + } else { + CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32; + lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0); + } + pos = ctype_bitpos(info); + bsz = ctype_bitbsz(info); + lua_assert(pos < 8*ctype_bitcsz(info)); + lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); + /* Check if a packed bitfield crosses a container boundary. */ + if (pos + bsz > 8*ctype_bitcsz(info)) + lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); + mask = ((1u << bsz) - 1u) << pos; + val = (val << pos) & mask; + /* NYI: packed bitfields may cause misaligned reads/writes. */ + switch (ctype_bitcsz(info)) { + case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; + case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; + case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; + default: lua_assert(0); break; + } +} + +/* -- Initialize C type with TValues -------------------------------------- */ + +/* Initialize an array with TValues. */ +static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, + TValue *o, MSize len) +{ + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize ofs, esize = dc->size; + MSize i; + if (len*esize > sz) + cconv_err_initov(cts, d); + for (i = 0, ofs = 0; i < len; i++, ofs += esize) + lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0); + if (ofs == esize) { /* Replicate a single element. */ + for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize); + } else { /* Otherwise fill the remainder with zero. */ + memset(dp + ofs, 0, sz - ofs); + } +} + +/* Initialize a sub-struct/union with TValues. */ +static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp, + TValue *o, MSize len, MSize *ip) +{ + CTypeID id = d->sib; + while (id) { + CType *df = ctype_get(cts, id); + id = df->sib; + if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) { + MSize i = *ip; + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + if (i >= len) break; + *ip = i + 1; + if (ctype_isfield(df->info)) + lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0); + else + lj_cconv_bf_tv(cts, df, dp+df->size, o + i); + if ((d->info & CTF_UNION)) break; + } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) { + cconv_substruct_init(cts, ctype_child(cts, df), dp+df->size, o, len, ip); + } /* Ignore all other entries in the chain. */ + } +} + +/* Initialize a struct/union with TValues. */ +static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp, + TValue *o, MSize len) +{ + MSize i = 0; + memset(dp, 0, sz); /* Much simpler to clear the struct first. */ + cconv_substruct_init(cts, d, dp, o, len, &i); + if (i < len) + cconv_err_initov(cts, d); +} + +/* Check whether to use a multi-value initializer. +** This is true if an aggregate is to be initialized with a value. +** Valarrays are treated as values here so ct_tv handles (V|C, I|F). +*/ +int lj_cconv_multi_init(CType *d, TValue *o) +{ + if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info))) + return 0; /* Destination is not an aggregate. */ + if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info))) + return 0; /* Initializer is not a value. */ + return 1; /* Otherwise the initializer is a value. */ +} + +/* Initialize C type with TValues. Caveat: expects to get the raw CType! */ +void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, + uint8_t *dp, TValue *o, MSize len) +{ + if (len == 0) + memset(dp, 0, sz); + else if (len == 1 && !lj_cconv_multi_init(d, o)) + lj_cconv_ct_tv(cts, d, dp, o, 0); + else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */ + cconv_array_init(cts, d, sz, dp, o, len); + else if (ctype_isstruct(d->info)) + cconv_struct_init(cts, d, sz, dp, o, len); + else + cconv_err_initov(cts, d); +} + +#endif diff --git a/src/luajit2/src/lj_cconv.h b/src/luajit2/src/lj_cconv.h new file mode 100644 index 0000000000..47c145960d --- /dev/null +++ b/src/luajit2/src/lj_cconv.h @@ -0,0 +1,70 @@ +/* +** C type conversions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CCONV_H +#define _LJ_CCONV_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* Compressed C type index. ORDER CCX. */ +enum { + CCX_B, /* Bool. */ + CCX_I, /* Integer. */ + CCX_F, /* Floating-point number. */ + CCX_C, /* Complex. */ + CCX_V, /* Vector. */ + CCX_P, /* Pointer. */ + CCX_A, /* Refarray. */ + CCX_S /* Struct/union. */ +}; + +/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */ +static LJ_AINLINE uint32_t cconv_idx(CTInfo info) +{ + uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ + lua_assert(ctype_type(info) <= CT_MAYCONVERT); +#if LJ_64 + idx = ((U64x(f436fff5,fff7f021) >> 4*idx) & 15u); +#else + idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); +#endif + lua_assert(idx < 8); + return idx; +} + +#define cconv_idx2(dinfo, sinfo) \ + ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo))) + +#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src) + +/* Conversion flags. */ +#define CCF_CAST 0x00000001u +#define CCF_FROMTV 0x00000002u +#define CCF_SAME 0x00000004u +#define CCF_IGNQUAL 0x00000008u + +#define CCF_ARG_SHIFT 8 +#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT) +#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT) + +LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags); +LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, + uint8_t *dp, uint8_t *sp, CTInfo flags); +LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, + TValue *o, uint8_t *sp); +LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp); +LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d, + uint8_t *dp, TValue *o, CTInfo flags); +LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o); +LJ_FUNC int lj_cconv_multi_init(CType *d, TValue *o); +LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz, + uint8_t *dp, TValue *o, MSize len); + +#endif + +#endif diff --git a/src/luajit2/src/lj_cdata.c b/src/luajit2/src/lj_cdata.c new file mode 100644 index 0000000000..497b84ee8b --- /dev/null +++ b/src/luajit2/src/lj_cdata.c @@ -0,0 +1,284 @@ +/* +** C data management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" + +/* -- C data allocation --------------------------------------------------- */ + +/* Allocate a new C data object holding a reference to another object. */ +GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id) +{ + CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR); + GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR); + *(const void **)cdataptr(cd) = p; + return cd; +} + +/* Allocate variable-sized or specially aligned C data object. */ +GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align) +{ + global_State *g; + MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) + + (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0); + char *p = lj_mem_newt(cts->L, extra + sz, char); + uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata); + uintptr_t almask = (1u << align) - 1u; + GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata)); + lua_assert((char *)cd - p < 65536); + cdatav(cd)->offset = (uint16_t)((char *)cd - p); + cdatav(cd)->extra = extra; + cdatav(cd)->len = sz; + g = cts->g; + setgcrefr(cd->nextgc, g->gc.root); + setgcref(g->gc.root, obj2gco(cd)); + newwhite(g, obj2gco(cd)); + cd->marked |= 0x80; + cd->gct = ~LJ_TCDATA; + cd->typeid = id; + return cd; +} + +/* Free a C data object. */ +void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd) +{ + if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) { + GCobj *root; + makewhite(g, obj2gco(cd)); + obj2gco(cd)->gch.marked |= LJ_GC_FINALIZED; + if ((root = gcref(g->gc.mmudata)) != NULL) { + setgcrefr(cd->nextgc, root->gch.nextgc); + setgcref(root->gch.nextgc, obj2gco(cd)); + setgcref(g->gc.mmudata, obj2gco(cd)); + } else { + setgcref(cd->nextgc, obj2gco(cd)); + setgcref(g->gc.mmudata, obj2gco(cd)); + } + } else if (LJ_LIKELY(!cdataisv(cd))) { + CType *ct = ctype_raw(ctype_ctsG(g), cd->typeid); + CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; + lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || + ctype_isextern(ct->info)); + lj_mem_free(g, cd, sizeof(GCcdata) + sz); + } else { + lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); + } +} + +TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd) +{ + global_State *g = G(L); + GCtab *t = ctype_ctsG(g)->finalizer; + if (gcref(t->metatable)) { + /* Add cdata to finalizer table, if still enabled. */ + TValue *tv, tmp; + setcdataV(L, &tmp, cd); + lj_gc_anybarriert(L, t); + tv = lj_tab_set(L, t, &tmp); + cd->marked |= LJ_GC_CDATA_FIN; + return tv; + } else { + /* Otherwise return dummy TValue. */ + return &g->tmptv; + } +} + +/* -- C data indexing ----------------------------------------------------- */ + +/* Index C data by a TValue. Return CType and pointer. */ +CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp, + CTInfo *qual) +{ + uint8_t *p = (uint8_t *)cdataptr(cd); + CType *ct = ctype_get(cts, cd->typeid); + ptrdiff_t idx; + + /* Resolve reference for cdata object. */ + if (ctype_isref(ct->info)) { + lua_assert(ct->size == CTSIZE_PTR); + p = *(uint8_t **)p; + ct = ctype_child(cts, ct); + } + +collect_attrib: + /* Skip attributes and collect qualifiers. */ + while (ctype_isattrib(ct->info)) { + if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; + ct = ctype_child(cts, ct); + } + lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */ + + if (tvisint(key)) { + idx = (ptrdiff_t)intV(key); + goto integer_key; + } else if (tvisnum(key)) { /* Numeric key. */ + idx = LJ_64 ? (ptrdiff_t)numV(key) : (ptrdiff_t)lj_num2int(numV(key)); + integer_key: + if (ctype_ispointer(ct->info)) { + CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */ + if (sz != CTSIZE_INVALID) { + if (ctype_isptr(ct->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) { + if ((ct->info & CTF_COMPLEX)) idx &= 1; + *qual |= CTF_CONST; /* Valarray elements are constant. */ + } + *pp = p + idx*(int32_t)sz; + return ct; + } + } + } else if (tviscdata(key)) { /* Integer cdata key. */ + GCcdata *cdk = cdataV(key); + CType *ctk = ctype_raw(cts, cdk->typeid); + if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk); + if (ctype_isinteger(ctk->info)) { + lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk, + (uint8_t *)&idx, cdataptr(cdk), 0); + goto integer_key; + } + } else if (tvisstr(key)) { /* String key. */ + GCstr *name = strV(key); + if (ctype_isstruct(ct->info)) { + CTSize ofs; + CType *fct = lj_ctype_getfield(cts, ct, name, &ofs); + if (fct) { + *pp = p + ofs; + return fct; + } + } else if (ctype_iscomplex(ct->info)) { + if (name->len == 2) { + *qual |= CTF_CONST; /* Complex fields are constant. */ + if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') { + *pp = p; + return ct; + } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') { + *pp = p + (ct->size >> 1); + return ct; + } + } + } else if (cd->typeid == CTID_CTYPEID) { + /* Allow indexing a (pointer to) struct constructor to get constants. */ + CType *sct = ctype_raw(cts, *(CTypeID *)p); + if (ctype_isptr(sct->info)) + sct = ctype_rawchild(cts, sct); + if (ctype_isstruct(sct->info)) { + CTSize ofs; + CType *fct = lj_ctype_getfield(cts, sct, name, &ofs); + if (fct && ctype_isconstval(fct->info)) + return fct; + } + } + } + if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ + if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) { + p = (uint8_t *)cdata_getptr(p, ct->size); + ct = ctype_child(cts, ct); + goto collect_attrib; + } + } + *qual |= 1; /* Lookup failed. */ + return ct; /* But return the resolved raw type. */ +} + +/* -- C data getters ------------------------------------------------------ */ + +/* Get constant value and convert to TValue. */ +static void cdata_getconst(CTState *cts, TValue *o, CType *ct) +{ + CType *ctt = ctype_child(cts, ct); + lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); + /* Constants are already zero-extended/sign-extended to 32 bits. */ + if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) + setnumV(o, (lua_Number)(uint32_t)ct->size); + else + setintV(o, (int32_t)ct->size); +} + +/* Get C data value and convert to TValue. */ +int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp) +{ + CTypeID sid; + + if (ctype_isconstval(s->info)) { + cdata_getconst(cts, o, s); + return 0; /* No GC step needed. */ + } else if (ctype_isbitfield(s->info)) { + return lj_cconv_tv_bf(cts, s, o, sp); + } + + /* Get child type of pointer/array/field. */ + lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info)); + sid = ctype_cid(s->info); + s = ctype_get(cts, sid); + + /* Resolve reference for field. */ + if (ctype_isref(s->info)) { + lua_assert(s->size == CTSIZE_PTR); + sp = *(uint8_t **)sp; + sid = ctype_cid(s->info); + s = ctype_get(cts, sid); + } + + /* Skip attributes and enums. */ + while (ctype_isattrib(s->info) || ctype_isenum(s->info)) + s = ctype_child(cts, s); + + return lj_cconv_tv_ct(cts, s, sid, o, sp); +} + +/* -- C data setters ------------------------------------------------------ */ + +/* Convert TValue and set C data value. */ +void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual) +{ + if (ctype_isconstval(d->info)) { + goto err_const; + } else if (ctype_isbitfield(d->info)) { + if (((d->info|qual) & CTF_CONST)) goto err_const; + lj_cconv_bf_tv(cts, d, dp, o); + return; + } + + /* Get child type of pointer/array/field. */ + lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info)); + d = ctype_child(cts, d); + + /* Resolve reference for field. */ + if (ctype_isref(d->info)) { + lua_assert(d->size == CTSIZE_PTR); + dp = *(uint8_t **)dp; + d = ctype_child(cts, d); + } + + /* Skip attributes and collect qualifiers. */ + for (;;) { + if (ctype_isattrib(d->info)) { + if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size; + } else { + break; + } + d = ctype_child(cts, d); + } + + lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info)); + + if (((d->info|qual) & CTF_CONST)) { + err_const: + lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST); + } + + lj_cconv_ct_tv(cts, d, dp, o, 0); +} + +#endif diff --git a/src/luajit2/src/lj_cdata.h b/src/luajit2/src/lj_cdata.h new file mode 100644 index 0000000000..feb1bbf2d2 --- /dev/null +++ b/src/luajit2/src/lj_cdata.h @@ -0,0 +1,75 @@ +/* +** C data management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CDATA_H +#define _LJ_CDATA_H + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* Get C data pointer. */ +static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz) +{ + if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ + return ((void *)(uintptr_t)*(uint32_t *)p); + } else { + lua_assert(sz == CTSIZE_PTR); + return *(void **)p; + } +} + +/* Set C data pointer. */ +static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v) +{ + if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ + *(uint32_t *)p = (uint32_t)(uintptr_t)v; + } else { + lua_assert(sz == CTSIZE_PTR); + *(void **)p = (void *)v; + } +} + +/* Allocate fixed-size C data object. */ +static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz) +{ + GCcdata *cd; +#ifdef LUA_USE_ASSERT + CType *ct = ctype_raw(cts, id); + lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz); +#endif + cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); + cd->gct = ~LJ_TCDATA; + cd->typeid = ctype_check(cts, id); + return cd; +} + +/* Variant which works without a valid CTState. */ +static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz) +{ + GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz); + cd->gct = ~LJ_TCDATA; + cd->typeid = id; + return cd; +} + +LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id); +LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, + CTSize align); + +LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd); +LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd); + +LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, + uint8_t **pp, CTInfo *qual); +LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp); +LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, + CTInfo qual); + +#endif + +#endif diff --git a/src/luajit2/src/lj_char.c b/src/luajit2/src/lj_char.c new file mode 100644 index 0000000000..11f23efe44 --- /dev/null +++ b/src/luajit2/src/lj_char.c @@ -0,0 +1,43 @@ +/* +** Character types. +** Donated to the public domain. +** +** This is intended to replace the problematic libc single-byte NLS functions. +** These just don't make sense anymore with UTF-8 locales becoming the norm +** on POSIX systems. It never worked too well on Windows systems since hardly +** anyone bothered to call setlocale(). +** +** This table is hardcoded for ASCII. Identifiers include the characters +** 128-255, too. This allows for the use of all non-ASCII chars as identifiers +** in the lexer. This is a broad definition, but works well in practice +** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*). +** +** If you really need proper character types for UTF-8 strings, please use +** an add-on library such as slnunicode: http://luaforge.net/projects/sln/ +*/ + +#define lj_char_c +#define LUA_CORE + +#include "lj_char.h" + +LJ_DATADEF const uint8_t lj_char_bits[257] = { + 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4, + 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160, + 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132, + 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192, + 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128, + 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 +}; + diff --git a/src/luajit2/src/lj_char.h b/src/luajit2/src/lj_char.h new file mode 100644 index 0000000000..7b7c1322e0 --- /dev/null +++ b/src/luajit2/src/lj_char.h @@ -0,0 +1,42 @@ +/* +** Character types. +** Donated to the public domain. +*/ + +#ifndef _LJ_CHAR_H +#define _LJ_CHAR_H + +#include "lj_def.h" + +#define LJ_CHAR_CNTRL 0x01 +#define LJ_CHAR_SPACE 0x02 +#define LJ_CHAR_PUNCT 0x04 +#define LJ_CHAR_DIGIT 0x08 +#define LJ_CHAR_XDIGIT 0x10 +#define LJ_CHAR_UPPER 0x20 +#define LJ_CHAR_LOWER 0x40 +#define LJ_CHAR_IDENT 0x80 +#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER) +#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT) +#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT) + +/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */ +#define lj_char_isa(c, t) (lj_char_bits[(c)+1] & t) +#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL) +#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE) +#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT) +#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT) +#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT) +#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER) +#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER) +#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT) +#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA) +#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM) +#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH) + +#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1)) +#define lj_char_tolower(c) ((c) + lj_char_isupper(c)) + +LJ_DATA const uint8_t lj_char_bits[257]; + +#endif diff --git a/src/luajit2/src/lj_clib.c b/src/luajit2/src/lj_clib.c new file mode 100644 index 0000000000..7b9b6c1a29 --- /dev/null +++ b/src/luajit2/src/lj_clib.c @@ -0,0 +1,370 @@ +/* +** FFI C library loader. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" +#include "lj_str.h" +#include "lj_udata.h" +#include "lj_ctype.h" +#include "lj_cconv.h" +#include "lj_cdata.h" +#include "lj_clib.h" + +/* -- OS-specific functions ----------------------------------------------- */ + +#if LJ_TARGET_DLOPEN + +#include <dlfcn.h> +#include <stdio.h> + +#if defined(RTLD_DEFAULT) +#define CLIB_DEFHANDLE RTLD_DEFAULT +#elif LJ_TARGET_OSX || LJ_TARGET_BSD +#define CLIB_DEFHANDLE ((void *)-2) +#else +#define CLIB_DEFHANDLE NULL +#endif + +LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L) +{ + lj_err_callermsg(L, dlerror()); +} + +#define clib_error(L, fmt, name) clib_error_(L) + +#if LJ_TARGET_OSX +#define CLIB_SOEXT "%s.dylib" +#else +#define CLIB_SOEXT "%s.so" +#endif + +static const char *clib_extname(lua_State *L, const char *name) +{ + if (!strchr(name, '/')) { + if (!strchr(name, '.')) { + name = lj_str_pushf(L, CLIB_SOEXT, name); + L->top--; + } + if (!(name[0] == 'l' && name[1] == 'i' && name[2] == 'b')) { + name = lj_str_pushf(L, "lib%s", name); + L->top--; + } + } + return name; +} + +/* Quick and dirty solution to resolve shared library name from ld script. */ +static const char *clib_resolve_lds(lua_State *L, const char *name) +{ + FILE *fp = fopen(name, "r"); + if (fp) { + char *p, *e, buf[256]; + if (fgets(buf, sizeof(buf), fp) && !strncmp(buf, "/* GNU ld script", 16)) { + while (fgets(buf, sizeof(buf), fp)) { + if (!strncmp(buf, "GROUP", 5) && (p = strchr(buf, '('))) { + while (*++p == ' ') ; + for (e = p; *e && *e != ' ' && *e != ')'; e++) ; + fclose(fp); + return strdata(lj_str_new(L, p, e-p)); + } + } + } + fclose(fp); + } + return NULL; +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + void *h = dlopen(clib_extname(L, name), + RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); + if (!h) { + const char *e, *err = dlerror(); + if (*err == '/' && (e = strchr(err, ':')) && + (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) { + h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL)); + if (h) return h; + err = dlerror(); + } + lj_err_callermsg(L, err); + } + return h; +} + +static void clib_unloadlib(CLibrary *cl) +{ + if (!cl->handle && cl->handle != CLIB_DEFHANDLE) + dlclose(cl->handle); +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + void *p = dlsym(cl->handle, name); + return p; +} + +#elif LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#ifndef WINVER +#define WINVER 0x0500 +#endif +#include <windows.h> + +#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS +#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4 +BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); +#endif + +#define CLIB_DEFHANDLE ((void *)-1) + +/* Default libraries. */ +enum { + CLIB_HANDLE_EXE, + CLIB_HANDLE_DLL, + CLIB_HANDLE_CRT, + CLIB_HANDLE_KERNEL32, + CLIB_HANDLE_USER32, + CLIB_HANDLE_GDI32, + CLIB_HANDLE_MAX +}; + +static void *clib_def_handle[CLIB_HANDLE_MAX]; + +LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, + const char *name) +{ + DWORD err = GetLastError(); + char buf[128]; + if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, + NULL, err, 0, buf, sizeof(buf), NULL)) + buf[0] = '\0'; + lj_err_callermsg(L, lj_str_pushf(L, fmt, name, buf)); +} + +static int clib_needext(const char *s) +{ + while (*s) { + if (*s == '/' || *s == '\\' || *s == '.') return 0; + s++; + } + return 1; +} + +static const char *clib_extname(lua_State *L, const char *name) +{ + if (clib_needext(name)) { + name = lj_str_pushf(L, "%s.dll", name); + L->top--; + } + return name; +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + void *h = (void *)LoadLibraryA(clib_extname(L, name)); + if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name); + UNUSED(global); + return h; +} + +static void clib_unloadlib(CLibrary *cl) +{ + if (cl->handle == CLIB_DEFHANDLE) { + MSize i; + for (i = 0; i < CLIB_HANDLE_MAX; i++) { + void *h = clib_def_handle[i]; + if (h) { + clib_def_handle[i] = NULL; + FreeLibrary((HINSTANCE)h); + } + } + } else if (!cl->handle) { + FreeLibrary((HINSTANCE)cl->handle); + } +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + void *p = NULL; + if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */ + MSize i; + for (i = 0; i < CLIB_HANDLE_MAX; i++) { + HINSTANCE h = (HINSTANCE)clib_def_handle[i]; + if (!(void *)h) { /* Resolve default library handles (once). */ + switch (i) { + case CLIB_HANDLE_EXE: GetModuleHandleExA(0, NULL, &h); break; + case CLIB_HANDLE_DLL: + GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, + (const char *)clib_def_handle, &h); + break; + case CLIB_HANDLE_CRT: + GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, + (const char *)&_fmode, &h); + break; + case CLIB_HANDLE_KERNEL32: h = LoadLibraryA("kernel32.dll"); break; + case CLIB_HANDLE_USER32: h = LoadLibraryA("user32.dll"); break; + case CLIB_HANDLE_GDI32: h = LoadLibraryA("gdi32.dll"); break; + } + if (!h) continue; + clib_def_handle[i] = (void *)h; + } + p = (void *)GetProcAddress(h, name); + if (p) break; + } + } else { + p = (void *)GetProcAddress((HINSTANCE)cl->handle, name); + } + return p; +} + +#else + +#define CLIB_DEFHANDLE NULL + +LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, + const char *name) +{ + lj_err_callermsg(L, lj_str_pushf(L, fmt, name, "no support for this OS")); +} + +static void *clib_loadlib(lua_State *L, const char *name, int global) +{ + lj_err_callermsg(L, "no support for loading dynamic libraries for this OS"); + UNUSED(name); UNUSED(global); + return NULL; +} + +static void clib_unloadlib(CLibrary *cl) +{ + UNUSED(cl); +} + +static void *clib_getsym(CLibrary *cl, const char *name) +{ + UNUSED(cl); UNUSED(name); + return NULL; +} + +#endif + +/* -- C library indexing -------------------------------------------------- */ + +#if LJ_TARGET_X86 && LJ_ABI_WIN +/* Compute argument size for fastcall/stdcall functions. */ +static CTSize clib_func_argsize(CTState *cts, CType *ct) +{ + CTSize n = 0; + while (ct->sib) { + CType *d; + ct = ctype_get(cts, ct->sib); + lua_assert(ctype_isfield(ct->info)); + d = ctype_rawchild(cts, ct); + n += ((d->size + 3) & ~3); + } + return n; +} +#endif + +/* Get redirected or mangled external symbol. */ +static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name) +{ + if (ct->sib) { + CType *ctf = ctype_get(cts, ct->sib); + if (ctype_isxattrib(ctf->info, CTA_REDIR)) + return strdata(gco2str(gcref(ctf->name))); + } + return strdata(name); +} + +/* Index a C library by name. */ +TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name) +{ + TValue *tv = lj_tab_setstr(L, cl->cache, name); + if (LJ_UNLIKELY(tvisnil(tv))) { + CTState *cts = ctype_cts(L); + CType *ct; + CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); + if (!id) + lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); + if (ctype_isconstval(ct->info)) { + CType *ctt = ctype_child(cts, ct); + lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); + if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) + setnumV(tv, (lua_Number)(uint32_t)ct->size); + else + setintV(tv, (int32_t)ct->size); + } else { + const char *sym = clib_extsym(cts, ct, name); + void *p = clib_getsym(cl, sym); + GCcdata *cd; + lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info)); +#if LJ_TARGET_X86 && LJ_ABI_WIN + /* Retry with decorated name for fastcall/stdcall functions. */ + if (!p && ctype_isfunc(ct->info)) { + CTInfo cconv = ctype_cconv(ct->info); + if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) { + CTSize sz = clib_func_argsize(cts, ct); + sym = lj_str_pushf(L, cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d", + sym, sz); + L->top--; + p = clib_getsym(cl, sym); + } + } +#endif + if (!p) + clib_error(L, "cannot resolve symbol " LUA_QS ": %s", strdata(name)); + cd = lj_cdata_new(cts, id, CTSIZE_PTR); + *(void **)cdataptr(cd) = p; + setcdataV(L, tv, cd); + } + } + return tv; +} + +/* -- C library management ------------------------------------------------ */ + +/* Create a new CLibrary object and push it on the stack. */ +static CLibrary *clib_new(lua_State *L, GCtab *mt) +{ + GCtab *t = lj_tab_new(L, 0, 0); + GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t); + CLibrary *cl = (CLibrary *)uddata(ud); + cl->cache = t; + ud->udtype = UDTYPE_FFI_CLIB; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcref(ud->metatable, obj2gco(mt)); + setudataV(L, L->top++, ud); + return cl; +} + +/* Load a C library. */ +void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global) +{ + void *handle = clib_loadlib(L, strdata(name), global); + CLibrary *cl = clib_new(L, mt); + cl->handle = handle; +} + +/* Unload a C library. */ +void lj_clib_unload(CLibrary *cl) +{ + clib_unloadlib(cl); + cl->handle = NULL; +} + +/* Create the default C library object. */ +void lj_clib_default(lua_State *L, GCtab *mt) +{ + CLibrary *cl = clib_new(L, mt); + cl->handle = CLIB_DEFHANDLE; +} + +#endif diff --git a/src/luajit2/src/lj_clib.h b/src/luajit2/src/lj_clib.h new file mode 100644 index 0000000000..51c3270379 --- /dev/null +++ b/src/luajit2/src/lj_clib.h @@ -0,0 +1,29 @@ +/* +** FFI C library loader. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CLIB_H +#define _LJ_CLIB_H + +#include "lj_obj.h" + +#if LJ_HASFFI + +/* Namespace for C library indexing. */ +#define CLNS_INDEX ((1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL)) + +/* C library namespace. */ +typedef struct CLibrary { + void *handle; /* Opaque handle for dynamic library loader. */ + GCtab *cache; /* Cache for resolved symbols. Anchored in ud->env. */ +} CLibrary; + +LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name); +LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global); +LJ_FUNC void lj_clib_unload(CLibrary *cl); +LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt); + +#endif + +#endif diff --git a/src/luajit2/src/lj_cparse.c b/src/luajit2/src/lj_cparse.c new file mode 100644 index 0000000000..523e51462e --- /dev/null +++ b/src/luajit2/src/lj_cparse.c @@ -0,0 +1,1829 @@ +/* +** C declaration parser. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_ctype.h" +#include "lj_cparse.h" +#include "lj_frame.h" +#include "lj_vm.h" +#include "lj_char.h" + +/* +** Important note: this is NOT a validating C parser! This is a minimal +** C declaration parser, solely for use by the LuaJIT FFI. +** +** It ought to return correct results for properly formed C declarations, +** but it may accept some invalid declarations, too (and return nonsense). +** Also, it shows rather generic error messages to avoid unnecessary bloat. +** If in doubt, please check the input against your favorite C compiler. +*/ + +/* -- C lexer ------------------------------------------------------------- */ + +/* C lexer token names. */ +static const char *const ctoknames[] = { +#define CTOKSTR(name, str) str, +CTOKDEF(CTOKSTR) +#undef CTOKSTR + NULL +}; + +/* Forward declaration. */ +LJ_NORET static void cp_err(CPState *cp, ErrMsg em); + +static const char *cp_tok2str(CPState *cp, CPToken tok) +{ + lua_assert(tok < CTOK_FIRSTDECL); + if (tok > CTOK_OFS) + return ctoknames[tok-CTOK_OFS-1]; + else if (!lj_char_iscntrl(tok)) + return lj_str_pushf(cp->L, "%c", tok); + else + return lj_str_pushf(cp->L, "char(%d)", tok); +} + +/* End-of-line? */ +static LJ_AINLINE int cp_iseol(CPChar c) +{ + return (c == '\n' || c == '\r'); +} + +static LJ_AINLINE CPChar cp_get(CPState *cp); + +/* Peek next raw character. */ +static LJ_AINLINE CPChar cp_rawpeek(CPState *cp) +{ + return (CPChar)(uint8_t)(*cp->p); +} + +/* Transparently skip backslash-escaped line breaks. */ +static LJ_NOINLINE CPChar cp_get_bs(CPState *cp) +{ + CPChar c2, c = cp_rawpeek(cp); + if (!cp_iseol(c)) return cp->c; + cp->p++; + c2 = cp_rawpeek(cp); + if (cp_iseol(c2) && c2 != c) cp->p++; + cp->linenumber++; + return cp_get(cp); +} + +/* Get next character. */ +static LJ_AINLINE CPChar cp_get(CPState *cp) +{ + cp->c = (CPChar)(uint8_t)(*cp->p++); + if (LJ_LIKELY(cp->c != '\\')) return cp->c; + return cp_get_bs(cp); +} + +/* Grow save buffer. */ +static LJ_NOINLINE void cp_save_grow(CPState *cp, CPChar c) +{ + MSize newsize; + if (cp->sb.sz >= CPARSE_MAX_BUF/2) + cp_err(cp, LJ_ERR_XELEM); + newsize = cp->sb.sz * 2; + lj_str_resizebuf(cp->L, &cp->sb, newsize); + cp->sb.buf[cp->sb.n++] = (char)c; +} + +/* Save character in buffer. */ +static LJ_AINLINE void cp_save(CPState *cp, CPChar c) +{ + if (LJ_UNLIKELY(cp->sb.n + 1 > cp->sb.sz)) + cp_save_grow(cp, c); + else + cp->sb.buf[cp->sb.n++] = (char)c; +} + +/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ +static void cp_newline(CPState *cp) +{ + CPChar c = cp_rawpeek(cp); + if (cp_iseol(c) && c != cp->c) cp->p++; + cp->linenumber++; +} + +LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...) +{ + const char *msg, *tokstr; + lua_State *L; + va_list argp; + if (tok == 0) { + tokstr = NULL; + } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING || + tok >= CTOK_FIRSTDECL) { + cp_save(cp, '\0'); + tokstr = cp->sb.buf; + } else { + tokstr = cp_tok2str(cp, tok); + } + L = cp->L; + va_start(argp, em); + msg = lj_str_pushvf(L, err2msg(em), argp); + va_end(argp); + if (tokstr) + msg = lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr); + if (cp->linenumber > 1) + msg = lj_str_pushf(L, "%s at line %d", msg, cp->linenumber); + lj_err_callermsg(L, msg); +} + +LJ_NORET LJ_NOINLINE static void cp_err_token(CPState *cp, CPToken tok) +{ + cp_errmsg(cp, cp->tok, LJ_ERR_XTOKEN, cp_tok2str(cp, tok)); +} + +LJ_NORET LJ_NOINLINE static void cp_err_badidx(CPState *cp, CType *ct) +{ + GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); + cp_errmsg(cp, 0, LJ_ERR_FFI_BADIDX, strdata(s)); +} + +LJ_NORET LJ_NOINLINE static void cp_err(CPState *cp, ErrMsg em) +{ + cp_errmsg(cp, 0, em); +} + +/* -- Main lexical scanner ------------------------------------------------ */ + +/* Parse integer literal. */ +static CPToken cp_integer(CPState *cp) +{ + uint32_t n = 0; + cp->val.id = CTID_INT32; + if (cp->c != '0') { /* Decimal. */ + do { + n = n*10 + (cp->c - '0'); + } while (lj_char_isdigit(cp_get(cp))); + } else if ((cp_get(cp)& ~0x20) == 'X') { /* Hexadecimal. */ + if (!lj_char_isxdigit(cp_get(cp))) + cp_err(cp, LJ_ERR_XNUMBER); + do { + n = n*16 + (cp->c & 15); + if (!lj_char_isdigit(cp->c)) n += 9; + } while (lj_char_isxdigit(cp_get(cp))); + if (n >= 0x80000000u) cp->val.id = CTID_UINT32; + } else { /* Octal. */ + while (cp->c >= '0' && cp->c <= '7') { + n = n*8 + (cp->c - '0'); + cp_get(cp); + } + if (n >= 0x80000000u) cp->val.id = CTID_UINT32; + } + cp->val.u32 = n; + for (;;) { /* Parse suffixes. */ + if ((cp->c & ~0x20) == 'U') + cp->val.id = CTID_UINT32; + else if ((cp->c & ~0x20) != 'L') + break; + cp_get(cp); + } + if (lj_char_isident(cp->c)) + cp_errmsg(cp, cp->c, LJ_ERR_XNUMBER); + return CTOK_INTEGER; +} + +/* Parse identifier or keyword. */ +static CPToken cp_ident(CPState *cp) +{ + do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); + cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n); + cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask); + if (ctype_type(cp->ct->info) == CT_KW) + return ctype_cid(cp->ct->info); + return CTOK_IDENT; +} + +/* Parse string or character constant. */ +static CPToken cp_string(CPState *cp) +{ + CPChar delim = cp->c; + cp_get(cp); + while (cp->c != delim) { + CPChar c = cp->c; + if (c == '\0') cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); + if (c == '\\') { + c = cp_get(cp); + switch (c) { + case '\0': cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); break; + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case 'e': c = 27; break; + case 'x': + c = 0; + while (lj_char_isxdigit(cp_get(cp))) + c = (c<<4) + (lj_char_isdigit(cp->c) ? cp->c-'0' : (cp->c&15)+9); + cp_save(cp, (c & 0xff)); + continue; + default: + if (lj_char_isdigit(c)) { + c -= '0'; + if (lj_char_isdigit(cp_get(cp))) { + c = c*8 + (cp->c - '0'); + if (lj_char_isdigit(cp_get(cp))) { + c = c*8 + (cp->c - '0'); + cp_get(cp); + } + } + cp_save(cp, (c & 0xff)); + continue; + } + break; + } + } + cp_save(cp, c); + cp_get(cp); + } + cp_get(cp); + if (delim == '"') { + cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n); + return CTOK_STRING; + } else { + if (cp->sb.n != 1) cp_err_token(cp, '\''); + cp->val.i32 = (int32_t)(char)cp->sb.buf[0]; + cp->val.id = CTID_INT32; + return CTOK_INTEGER; + } +} + +/* Skip C comment. */ +static void cp_comment_c(CPState *cp) +{ + do { + if (cp_get(cp) == '*' && cp_get(cp) == '/') { cp_get(cp); break; } + if (cp_iseol(cp->c)) cp_newline(cp); + } while (cp->c != '\0'); +} + +/* Skip C++ comment. */ +static void cp_comment_cpp(CPState *cp) +{ + while (!cp_iseol(cp_get(cp)) && cp->c != '\0') + ; +} + +/* Lexical scanner for C. Only a minimal subset is implemented. */ +static CPToken cp_next_(CPState *cp) +{ + lj_str_resetbuf(&cp->sb); + for (;;) { + if (lj_char_isident(cp->c)) + return lj_char_isdigit(cp->c) ? cp_integer(cp) : cp_ident(cp); + switch (cp->c) { + case '\n': case '\r': cp_newline(cp); /* fallthrough. */ + case ' ': case '\t': case '\v': case '\f': cp_get(cp); break; + case '"': case '\'': return cp_string(cp); + case '/': + cp_get(cp); + if (cp->c == '*') cp_comment_c(cp); + else if (cp->c == '/') cp_comment_cpp(cp); + else return '/'; + break; + case '|': + cp_get(cp); if (cp->c != '|') return '|'; cp_get(cp); return CTOK_OROR; + case '&': + cp_get(cp); if (cp->c != '&') return '&'; cp_get(cp); return CTOK_ANDAND; + case '=': + cp_get(cp); if (cp->c != '=') return '='; cp_get(cp); return CTOK_EQ; + case '!': + cp_get(cp); if (cp->c != '=') return '!'; cp_get(cp); return CTOK_NE; + case '<': + cp_get(cp); + if (cp->c == '=') { cp_get(cp); return CTOK_LE; } + else if (cp->c == '<') { cp_get(cp); return CTOK_SHL; } + return '<'; + case '>': + cp_get(cp); + if (cp->c == '=') { cp_get(cp); return CTOK_GE; } + else if (cp->c == '>') { cp_get(cp); return CTOK_SHR; } + return '>'; + case '-': + cp_get(cp); if (cp->c != '>') return '-'; cp_get(cp); return CTOK_DEREF; + case '\0': return CTOK_EOF; + default: { CPToken c = cp->c; cp_get(cp); return c; } + } + } +} + +static LJ_NOINLINE CPToken cp_next(CPState *cp) +{ + return (cp->tok = cp_next_(cp)); +} + +/* -- C parser ------------------------------------------------------------ */ + +/* Namespaces for resolving identifiers. */ +#define CPNS_DEFAULT \ + ((1u<<CT_KW)|(1u<<CT_TYPEDEF)|(1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL)) +#define CPNS_STRUCT ((1u<<CT_KW)|(1u<<CT_STRUCT)|(1u<<CT_ENUM)) + +typedef CTypeID CPDeclIdx; /* Index into declaration stack. */ +typedef uint32_t CPscl; /* Storage class flags. */ + +/* Type declaration context. */ +typedef struct CPDecl { + CPDeclIdx top; /* Top of declaration stack. */ + CPDeclIdx pos; /* Insertion position in declaration chain. */ + CPDeclIdx specpos; /* Saved position for declaration specifier. */ + uint32_t mode; /* Declarator mode. */ + CPState *cp; /* C parser state. */ + GCstr *name; /* Name of declared identifier (if direct). */ + GCstr *redir; /* Redirected symbol name. */ + CTypeID nameid; /* Existing typedef for declared identifier. */ + CTInfo attr; /* Attributes. */ + CTInfo fattr; /* Function attributes. */ + CTInfo specattr; /* Saved attributes. */ + CTInfo specfattr; /* Saved function attributes. */ + CTSize bits; /* Field size in bits (if any). */ + CType stack[CPARSE_MAX_DECLSTACK]; /* Type declaration stack. */ +} CPDecl; + +/* Forward declarations. */ +static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl); +static void cp_declarator(CPState *cp, CPDecl *decl); +static CTypeID cp_decl_abstract(CPState *cp); + +/* Initialize C parser state. Caller must set up: L, p, srcname, mode. */ +static void cp_init(CPState *cp) +{ + cp->linenumber = 1; + cp->depth = 0; + cp->curpack = 0; + cp->packstack[0] = 255; + lj_str_initbuf(&cp->sb); + lj_str_resizebuf(cp->L, &cp->sb, LJ_MIN_SBUF); + lua_assert(cp->p != NULL); + cp_get(cp); /* Read-ahead first char. */ + cp->tok = 0; + cp->tmask = CPNS_DEFAULT; + cp_next(cp); /* Read-ahead first token. */ +} + +/* Cleanup C parser state. */ +static void cp_cleanup(CPState *cp) +{ + global_State *g = G(cp->L); + lj_str_freebuf(g, &cp->sb); +} + +/* Check and consume optional token. */ +static int cp_opt(CPState *cp, CPToken tok) +{ + if (cp->tok == tok) { cp_next(cp); return 1; } + return 0; +} + +/* Check and consume token. */ +static void cp_check(CPState *cp, CPToken tok) +{ + if (cp->tok != tok) cp_err_token(cp, tok); + cp_next(cp); +} + +/* Check if the next token may start a type declaration. */ +static int cp_istypedecl(CPState *cp) +{ + if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECL) return 1; + if (cp->tok == CTOK_IDENT && ctype_istypedef(cp->ct->info)) return 1; + return 0; +} + +/* -- Constant expression evaluator --------------------------------------- */ + +/* Forward declarations. */ +static void cp_expr_unary(CPState *cp, CPValue *k); +static void cp_expr_sub(CPState *cp, CPValue *k, int pri); + +/* Please note that type handling is very weak here. Most ops simply +** assume integer operands. Accessors are only needed to compute types and +** return synthetic values. The only purpose of the expression evaluator +** is to compute the values of constant expressions one would typically +** find in C header files. And again: this is NOT a validating C parser! +*/ + +/* Parse comma separated expression and return last result. */ +static void cp_expr_comma(CPState *cp, CPValue *k) +{ + do { cp_expr_sub(cp, k, 0); } while (cp_opt(cp, ',')); +} + +/* Parse sizeof/alignof operator. */ +static void cp_expr_sizeof(CPState *cp, CPValue *k, int wantsz) +{ + CTSize sz; + CTInfo info; + if (cp_opt(cp, '(')) { + if (cp_istypedecl(cp)) + k->id = cp_decl_abstract(cp); + else + cp_expr_comma(cp, k); + cp_check(cp, ')'); + } else { + cp_expr_unary(cp, k); + } + info = lj_ctype_info(cp->cts, k->id, &sz); + if (wantsz) { + if (sz != CTSIZE_INVALID) + k->u32 = sz; + else if (k->id != CTID_A_CCHAR) /* Special case for sizeof("string"). */ + cp_err(cp, LJ_ERR_FFI_INVSIZE); + } else { + k->u32 = 1u << ctype_align(info); + } + k->id = CTID_UINT32; /* Really size_t. */ +} + +/* Parse prefix operators. */ +static void cp_expr_prefix(CPState *cp, CPValue *k) +{ + if (cp->tok == CTOK_INTEGER) { + *k = cp->val; cp_next(cp); + } else if (cp_opt(cp, '+')) { + cp_expr_unary(cp, k); /* Nothing to do (well, integer promotion). */ + } else if (cp_opt(cp, '-')) { + cp_expr_unary(cp, k); k->i32 = -k->i32; + } else if (cp_opt(cp, '~')) { + cp_expr_unary(cp, k); k->i32 = ~k->i32; + } else if (cp_opt(cp, '!')) { + cp_expr_unary(cp, k); k->i32 = !k->i32; k->id = CTID_INT32; + } else if (cp_opt(cp, '(')) { + if (cp_istypedecl(cp)) { /* Cast operator. */ + CTypeID id = cp_decl_abstract(cp); + cp_check(cp, ')'); + cp_expr_unary(cp, k); + k->id = id; /* No conversion performed. */ + } else { /* Sub-expression. */ + cp_expr_comma(cp, k); + cp_check(cp, ')'); + } + } else if (cp_opt(cp, '*')) { /* Indirection. */ + CType *ct; + cp_expr_unary(cp, k); + ct = lj_ctype_rawref(cp->cts, k->id); + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + k->u32 = 0; k->id = ctype_cid(ct->info); + } else if (cp_opt(cp, '&')) { /* Address operator. */ + cp_expr_unary(cp, k); + k->id = lj_ctype_intern(cp->cts, CTINFO(CT_PTR, CTALIGN_PTR+k->id), + CTSIZE_PTR); + } else if (cp_opt(cp, CTOK_SIZEOF)) { + cp_expr_sizeof(cp, k, 1); + } else if (cp_opt(cp, CTOK_ALIGNOF)) { + cp_expr_sizeof(cp, k, 0); + } else if (cp->tok == CTOK_IDENT) { + if (ctype_type(cp->ct->info) == CT_CONSTVAL) { + k->u32 = cp->ct->size; k->id = ctype_cid(cp->ct->info); + } else if (ctype_type(cp->ct->info) == CT_EXTERN) { + k->u32 = cp->val.id; k->id = ctype_cid(cp->ct->info); + } else if (ctype_type(cp->ct->info) == CT_FUNC) { + k->u32 = cp->val.id; k->id = cp->val.id; + } else { + goto err_expr; + } + cp_next(cp); + } else if (cp->tok == CTOK_STRING) { + CTSize sz = cp->str->len; + while (cp_next(cp) == CTOK_STRING) + sz += cp->str->len; + k->u32 = sz + 1; + k->id = CTID_A_CCHAR; + } else { + err_expr: + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + } +} + +/* Parse postfix operators. */ +static void cp_expr_postfix(CPState *cp, CPValue *k) +{ + for (;;) { + CType *ct; + if (cp_opt(cp, '[')) { /* Array/pointer index. */ + CPValue k2; + cp_expr_comma(cp, &k2); + ct = lj_ctype_rawref(cp->cts, k->id); + if (!ctype_ispointer(ct->info)) { + ct = lj_ctype_rawref(cp->cts, k2.id); + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + } + cp_check(cp, ']'); + k->u32 = 0; + } else if (cp->tok == '.' || cp->tok == CTOK_DEREF) { /* Struct deref. */ + CTSize ofs; + CType *fct; + ct = lj_ctype_rawref(cp->cts, k->id); + if (cp->tok == CTOK_DEREF) { + if (!ctype_ispointer(ct->info)) + cp_err_badidx(cp, ct); + ct = lj_ctype_rawref(cp->cts, ctype_cid(ct->info)); + } + cp_next(cp); + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (!ctype_isstruct(ct->info) || ct->size == CTSIZE_INVALID || + !(fct = lj_ctype_getfield(cp->cts, ct, cp->str, &ofs)) || + ctype_isbitfield(fct->info)) { + GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL); + cp_errmsg(cp, 0, LJ_ERR_FFI_BADMEMBER, strdata(s), strdata(cp->str)); + } + ct = fct; + k->u32 = ctype_isconstval(ct->info) ? ct->size : 0; + cp_next(cp); + } else { + return; + } + k->id = ctype_cid(ct->info); + } +} + +/* Parse infix operators. */ +static void cp_expr_infix(CPState *cp, CPValue *k, int pri) +{ + CPValue k2; + k2.u32 = 0; k2.id = 0; /* Silence the compiler. */ + for (;;) { + switch (pri) { + case 0: + if (cp_opt(cp, '?')) { + CPValue k3; + cp_expr_comma(cp, &k2); /* Right-associative. */ + cp_check(cp, ':'); + cp_expr_sub(cp, &k3, 0); + k->u32 = k->u32 ? k2.u32 : k3.u32; + k->id = k2.id > k3.id ? k2.id : k3.id; + continue; + } + case 1: + if (cp_opt(cp, CTOK_OROR)) { + cp_expr_sub(cp, &k2, 2); k->i32 = k->u32 || k2.u32; k->id = CTID_INT32; + continue; + } + case 2: + if (cp_opt(cp, CTOK_ANDAND)) { + cp_expr_sub(cp, &k2, 3); k->i32 = k->u32 && k2.u32; k->id = CTID_INT32; + continue; + } + case 3: + if (cp_opt(cp, '|')) { + cp_expr_sub(cp, &k2, 4); k->u32 = k->u32 | k2.u32; goto arith_result; + } + case 4: + if (cp_opt(cp, '^')) { + cp_expr_sub(cp, &k2, 5); k->u32 = k->u32 ^ k2.u32; goto arith_result; + } + case 5: + if (cp_opt(cp, '&')) { + cp_expr_sub(cp, &k2, 6); k->u32 = k->u32 & k2.u32; goto arith_result; + } + case 6: + if (cp_opt(cp, CTOK_EQ)) { + cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 == k2.u32; k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_NE)) { + cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 != k2.u32; k->id = CTID_INT32; + continue; + } + case 7: + if (cp_opt(cp, '<')) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 < k2.i32; + else + k->i32 = k->u32 < k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, '>')) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 > k2.i32; + else + k->i32 = k->u32 > k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_LE)) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 <= k2.i32; + else + k->i32 = k->u32 <= k2.u32; + k->id = CTID_INT32; + continue; + } else if (cp_opt(cp, CTOK_GE)) { + cp_expr_sub(cp, &k2, 8); + if (k->id == CTID_INT32 && k2.id == CTID_INT32) + k->i32 = k->i32 >= k2.i32; + else + k->i32 = k->u32 >= k2.u32; + k->id = CTID_INT32; + continue; + } + case 8: + if (cp_opt(cp, CTOK_SHL)) { + cp_expr_sub(cp, &k2, 9); k->u32 = k->u32 << k2.u32; + continue; + } else if (cp_opt(cp, CTOK_SHR)) { + cp_expr_sub(cp, &k2, 9); + if (k->id == CTID_INT32) + k->i32 = k->i32 >> k2.i32; + else + k->u32 = k->u32 >> k2.u32; + continue; + } + case 9: + if (cp_opt(cp, '+')) { + cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 + k2.u32; + arith_result: + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + continue; + } else if (cp_opt(cp, '-')) { + cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 - k2.u32; goto arith_result; + } + case 10: + if (cp_opt(cp, '*')) { + cp_expr_unary(cp, &k2); k->u32 = k->u32 * k2.u32; goto arith_result; + } else if (cp_opt(cp, '/')) { + cp_expr_unary(cp, &k2); + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + if (k2.u32 == 0 || + (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) + cp_err(cp, LJ_ERR_BADVAL); + if (k->id == CTID_INT32) + k->i32 = k->i32 / k2.i32; + else + k->u32 = k->u32 / k2.u32; + continue; + } else if (cp_opt(cp, '%')) { + cp_expr_unary(cp, &k2); + if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */ + if (k2.u32 == 0 || + (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1)) + cp_err(cp, LJ_ERR_BADVAL); + if (k->id == CTID_INT32) + k->i32 = k->i32 % k2.i32; + else + k->u32 = k->u32 % k2.u32; + continue; + } + default: + return; + } + } +} + +/* Parse and evaluate unary expression. */ +static void cp_expr_unary(CPState *cp, CPValue *k) +{ + if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); + cp_expr_prefix(cp, k); + cp_expr_postfix(cp, k); + cp->depth--; +} + +/* Parse and evaluate sub-expression. */ +static void cp_expr_sub(CPState *cp, CPValue *k, int pri) +{ + cp_expr_unary(cp, k); + cp_expr_infix(cp, k, pri); +} + +/* Parse constant integer expression. */ +static void cp_expr_kint(CPState *cp, CPValue *k) +{ + CType *ct; + cp_expr_sub(cp, k, 0); + ct = ctype_raw(cp->cts, k->id); + if (!ctype_isinteger(ct->info)) cp_err(cp, LJ_ERR_BADVAL); +} + +/* Parse (non-negative) size expression. */ +static CTSize cp_expr_ksize(CPState *cp) +{ + CPValue k; + cp_expr_kint(cp, &k); + if (k.u32 >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); + return k.u32; +} + +/* -- Type declaration stack management ----------------------------------- */ + +/* Add declaration element behind the insertion position. */ +static CPDeclIdx cp_add(CPDecl *decl, CTInfo info, CTSize size) +{ + CPDeclIdx top = decl->top; + if (top >= CPARSE_MAX_DECLSTACK) cp_err(decl->cp, LJ_ERR_XLEVELS); + decl->stack[top].info = info; + decl->stack[top].size = size; + decl->stack[top].sib = 0; + setgcrefnull(decl->stack[top].name); + decl->stack[top].next = decl->stack[decl->pos].next; + decl->stack[decl->pos].next = (CTypeID1)top; + decl->top = top+1; + return top; +} + +/* Push declaration element before the insertion position. */ +static CPDeclIdx cp_push(CPDecl *decl, CTInfo info, CTSize size) +{ + return (decl->pos = cp_add(decl, info, size)); +} + +/* Push or merge attributes. */ +static void cp_push_attributes(CPDecl *decl) +{ + CType *ct = &decl->stack[decl->pos]; + if (ctype_isfunc(ct->info)) { /* Ok to modify in-place. */ +#if LJ_TARGET_X86 + if ((decl->fattr & CTFP_CCONV)) + ct->info = (ct->info & (CTMASK_NUM|CTF_VARARG|CTMASK_CID)) + + (decl->fattr & ~CTMASK_CID); +#endif + } else { + if ((decl->attr & CTFP_ALIGNED) && !(decl->mode & CPARSE_MODE_FIELD)) + cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_ALIGN)), + ctype_align(decl->attr)); + } +} + +/* Push unrolled type to declaration stack and merge qualifiers. */ +static void cp_push_type(CPDecl *decl, CTypeID id) +{ + CType *ct = ctype_get(decl->cp->cts, id); + CTInfo info = ct->info; + CTSize size = ct->size; + switch (ctype_type(info)) { + case CT_STRUCT: case CT_ENUM: + cp_push(decl, CTINFO(CT_TYPEDEF, id), 0); /* Don't copy unique types. */ + if ((decl->attr & CTF_QUAL)) { /* Push unmerged qualifiers. */ + cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_QUAL)), + (decl->attr & CTF_QUAL)); + decl->attr &= ~CTF_QUAL; + } + break; + case CT_ATTRIB: + if (ctype_isxattrib(info, CTA_QUAL)) + decl->attr &= ~size; /* Remove redundant qualifiers. */ + cp_push_type(decl, ctype_cid(info)); /* Unroll. */ + cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ + break; + case CT_ARRAY: + cp_push_type(decl, ctype_cid(info)); /* Unroll. */ + cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */ + decl->stack[decl->pos].sib = 1; /* Mark as already checked and sized. */ + /* Note: this is not copied to the ct->sib in the C type table. */ + break; + case CT_FUNC: + /* Copy type, link parameters (shared). */ + decl->stack[cp_push(decl, info, size)].sib = ct->sib; + break; + default: + /* Copy type, merge common qualifiers. */ + cp_push(decl, info|(decl->attr & CTF_QUAL), size); + decl->attr &= ~CTF_QUAL; + break; + } +} + +/* Consume the declaration element chain and intern the C type. */ +static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) +{ + CTypeID id = 0; + CPDeclIdx idx = 0; + CTSize csize = CTSIZE_INVALID; + CTSize cinfo = 0; + do { + CType *ct = &decl->stack[idx]; + CTInfo info = ct->info; + CTInfo size = ct->size; + /* The cid is already part of info for copies of pointers/functions. */ + idx = ct->next; + if (ctype_istypedef(info)) { + lua_assert(id == 0); + id = ctype_cid(info); + /* Always refetch info/size, since struct/enum may have been completed. */ + cinfo = ctype_get(cp->cts, id)->info; + csize = ctype_get(cp->cts, id)->size; + lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo)); + } else if (ctype_isfunc(info)) { /* Intern function. */ + CType *fct; + CTypeID fid; + CTypeID sib; + if (id) { + CType *refct = ctype_raw(cp->cts, id); + /* Reject function or refarray return types. */ + if (ctype_isfunc(refct->info) || ctype_isrefarray(refct->info)) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + } + /* No intervening attributes allowed, skip forward. */ + while (idx) { + CType *ctn = &decl->stack[idx]; + if (!ctype_isattrib(ctn->info)) break; + idx = ctn->next; /* Skip attribute. */ + } + sib = ct->sib; /* Next line may reallocate the C type table. */ + fid = lj_ctype_new(cp->cts, &fct); + csize = CTSIZE_INVALID; + fct->info = cinfo = info + id; + fct->size = size; + fct->sib = sib; + id = fid; + } else if (ctype_isattrib(info)) { + if (ctype_isxattrib(info, CTA_QUAL)) + cinfo |= size; + else if (ctype_isxattrib(info, CTA_ALIGN)) + CTF_INSERT(cinfo, ALIGN, size); + id = lj_ctype_intern(cp->cts, info+id, size); + /* Inherit csize/cinfo from original type. */ + } else { + if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ + lua_assert(id == 0); + if (!(info & CTF_BOOL)) { + CTSize msize = ctype_msizeP(decl->attr); + CTSize vsize = ctype_vsizeP(decl->attr); + if (msize && (!(info & CTF_FP) || (msize == 4 || msize == 8))) { + CTSize malign = lj_fls(msize); + if (malign > 4) malign = 4; /* Limit alignment. */ + CTF_INSERT(info, ALIGN, malign); + size = msize; /* Override size via mode. */ + } + if (vsize) { /* Vector size set? */ + CTSize esize = lj_fls(size); + if (vsize >= esize) { + /* Intern the element type first. */ + id = lj_ctype_intern(cp->cts, info, size); + /* Then create a vector (array) with vsize alignment. */ + size = (1u << vsize); + if (vsize > 4) vsize = 4; /* Limit alignment. */ + if (ctype_align(info) > vsize) vsize = ctype_align(info); + info = CTINFO(CT_ARRAY, (info & CTF_QUAL) + CTF_VECTOR + + CTALIGN(vsize)); + } + } + } + } else if (ctype_isptr(info)) { + /* Reject pointer/ref to ref. */ + if (id && ctype_isref(ctype_raw(cp->cts, id)->info)) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + if (ctype_isref(info)) { + info &= ~CTF_VOLATILE; /* Refs are always const, never volatile. */ + /* No intervening attributes allowed, skip forward. */ + while (idx) { + CType *ctn = &decl->stack[idx]; + if (!ctype_isattrib(ctn->info)) break; + idx = ctn->next; /* Skip attribute. */ + } + } + } else if (ctype_isarray(info)) { /* Check for valid array size etc. */ + if (ct->sib == 0) { /* Only check/size arrays not copied by unroll. */ + if (ctype_isref(cinfo)) /* Reject arrays of refs. */ + cp_err(cp, LJ_ERR_FFI_INVTYPE); + /* Reject VLS or unknown-sized types. */ + if (ctype_isvltype(cinfo) || csize == CTSIZE_INVALID) + cp_err(cp, LJ_ERR_FFI_INVSIZE); + /* a[] and a[?] keep their invalid size. */ + if (size != CTSIZE_INVALID) { + uint64_t xsz = (uint64_t)size * csize; + if (xsz >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE); + size = (CTSize)xsz; + } + } + info |= (cinfo & (CTF_QUAL|CTF_ALIGN)); /* Inherit qual and align. */ + } else { + lua_assert(ctype_isvoid(info)); + } + csize = size; + cinfo = info+id; + id = lj_ctype_intern(cp->cts, info+id, size); + } + } while (idx); + return id; +} + +/* -- C declaration parser ------------------------------------------------ */ + +#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be) + +/* Reset declaration state to declaration specifier. */ +static void cp_decl_reset(CPDecl *decl) +{ + decl->pos = decl->specpos; + decl->top = decl->specpos+1; + decl->stack[decl->specpos].next = 0; + decl->attr = decl->specattr; + decl->fattr = decl->specfattr; + decl->name = NULL; + decl->redir = NULL; +} + +/* Parse constant initializer. */ +/* NYI: FP constants and strings as initializers. */ +static CTypeID cp_decl_constinit(CPState *cp, CType **ctp, CTypeID typeid) +{ + CType *ctt = ctype_get(cp->cts, typeid); + CTInfo info; + CTSize size; + CPValue k; + CTypeID constid; + while (ctype_isattrib(ctt->info)) { /* Skip attributes. */ + typeid = ctype_cid(ctt->info); /* Update ID, too. */ + ctt = ctype_get(cp->cts, typeid); + } + info = ctt->info; + size = ctt->size; + if (!ctype_isinteger(info) || !(info & CTF_CONST) || size > 4) + cp_err(cp, LJ_ERR_FFI_INVTYPE); + cp_check(cp, '='); + cp_expr_sub(cp, &k, 0); + constid = lj_ctype_new(cp->cts, ctp); + (*ctp)->info = CTINFO(CT_CONSTVAL, CTF_CONST|typeid); + k.u32 <<= 8*(4-size); + if ((info & CTF_UNSIGNED)) + k.u32 >>= 8*(4-size); + else + k.u32 = (uint32_t)((int32_t)k.u32 >> 8*(4-size)); + (*ctp)->size = k.u32; + return constid; +} + +/* Parse size in parentheses as part of attribute. */ +static CTSize cp_decl_sizeattr(CPState *cp) +{ + CTSize sz; + uint32_t oldtmask = cp->tmask; + cp->tmask = CPNS_DEFAULT; /* Required for expression evaluator. */ + cp_check(cp, '('); + sz = cp_expr_ksize(cp); + cp->tmask = oldtmask; + cp_check(cp, ')'); + return sz; +} + +/* Parse alignment attribute. */ +static void cp_decl_align(CPState *cp, CPDecl *decl) +{ + CTSize al = 4; /* Unspecified alignment is 16 bytes. */ + if (cp->tok == '(') { + al = cp_decl_sizeattr(cp); + al = al ? lj_fls(al) : 0; + } + CTF_INSERT(decl->attr, ALIGN, al); + decl->attr |= CTFP_ALIGNED; +} + +/* Parse GCC asm("name") redirect. */ +static void cp_decl_asm(CPState *cp, CPDecl *decl) +{ + UNUSED(decl); + cp_next(cp); + cp_check(cp, '('); + if (cp->tok == CTOK_STRING) { + GCstr *str = cp->str; + while (cp_next(cp) == CTOK_STRING) { + lj_str_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str)); + cp->L->top--; + str = strV(cp->L->top); + } + decl->redir = str; + } + cp_check(cp, ')'); +} + +/* Parse GCC __attribute__((mode(...))). */ +static void cp_decl_mode(CPState *cp, CPDecl *decl) +{ + cp_check(cp, '('); + if (cp->tok == CTOK_IDENT) { + const char *s = strdata(cp->str); + CTSize sz = 0, vlen = 0; + if (s[0] == '_' && s[1] == '_') s += 2; + if (*s == 'V') { + s++; + vlen = *s++ - '0'; + if (*s >= '0' && *s <= '9') + vlen = vlen*10 + (*s++ - '0'); + } + switch (*s++) { + case 'Q': sz = 1; break; + case 'H': sz = 2; break; + case 'S': sz = 4; break; + case 'D': sz = 8; break; + case 'T': sz = 16; break; + case 'O': sz = 32; break; + default: goto bad_size; + } + if (*s == 'I' || *s == 'F') { + CTF_INSERT(decl->attr, MSIZEP, sz); + if (vlen) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vlen*sz)); + } + bad_size: + cp_next(cp); + } + cp_check(cp, ')'); +} + +/* Parse GCC __attribute__((...)). */ +static void cp_decl_gccattribute(CPState *cp, CPDecl *decl) +{ + cp_next(cp); + cp_check(cp, '('); + cp_check(cp, '('); + while (cp->tok != ')') { + if (cp->tok == CTOK_IDENT) { + GCstr *attrstr = cp->str; + cp_next(cp); + switch (attrstr->hash) { + case H_(64a9208e,8ce14319): case H_(8e6331b2,95a282af): /* aligned */ + cp_decl_align(cp, decl); + break; + case H_(42eb47de,f0ede26c): case H_(29f48a09,cf383e0c): /* packed */ + decl->attr |= CTFP_PACKED; + break; + case H_(0a84eef6,8dfab04c): case H_(995cf92c,d5696591): /* mode */ + cp_decl_mode(cp, decl); + break; + case H_(0ab31997,2d5213fa): case H_(bf875611,200e9990): /* vector_size */ + { + CTSize vsize = cp_decl_sizeattr(cp); + if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize)); + } + break; +#if LJ_TARGET_X86 + case H_(5ad22db8,c689b848): case H_(439150fa,65ea78cb): /* regparm */ + CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp)); + decl->fattr |= CTFP_CCONV; + break; + case H_(18fc0b98,7ff4c074): case H_(4e62abed,0a747424): /* cdecl */ + CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL); + decl->fattr |= CTFP_CCONV; + break; + case H_(72b2e41b,494c5a44): case H_(f2356d59,f25fc9bd): /* thiscall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(0d0ffc42,ab746f88): case H_(21c54ba1,7f0ca7e3): /* fastcall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(ef76b040,9412e06a): case H_(de56697b,c750e6e1): /* stdcall */ + CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL); + decl->fattr |= CTFP_CCONV; + break; + case H_(ea78b622,f234bd8e): case H_(252ffb06,8d50f34b): /* sseregparm */ + decl->fattr |= CTF_SSEREGPARM; + decl->fattr |= CTFP_CCONV; + break; +#endif + default: /* Skip all other attributes. */ + goto skip_attr; + } + } else if (cp->tok >= CTOK_FIRSTDECL) { /* For __attribute((const)) etc. */ + cp_next(cp); + skip_attr: + if (cp_opt(cp, '(')) { + while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); + cp_check(cp, ')'); + } + } else { + break; + } + if (!cp_opt(cp, ',')) break; + } + cp_check(cp, ')'); + cp_check(cp, ')'); +} + +/* Parse MSVC __declspec(...). */ +static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl) +{ + cp_next(cp); + cp_check(cp, '('); + while (cp->tok == CTOK_IDENT) { + GCstr *attrstr = cp->str; + cp_next(cp); + switch (attrstr->hash) { + case H_(bc2395fa,98f267f8): /* align */ + cp_decl_align(cp, decl); + break; + default: /* Ignore all other attributes. */ + if (cp_opt(cp, '(')) { + while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); + cp_check(cp, ')'); + } + break; + } + } + cp_check(cp, ')'); +} + +/* Parse declaration attributes (and common qualifiers). */ +static void cp_decl_attributes(CPState *cp, CPDecl *decl) +{ + for (;;) { + switch (cp->tok) { + case CTOK_CONST: decl->attr |= CTF_CONST; break; + case CTOK_VOLATILE: decl->attr |= CTF_VOLATILE; break; + case CTOK_RESTRICT: break; /* Ignore. */ + case CTOK_EXTENSION: break; /* Ignore. */ + case CTOK_ATTRIBUTE: cp_decl_gccattribute(cp, decl); continue; + case CTOK_ASM: cp_decl_asm(cp, decl); continue; + case CTOK_DECLSPEC: cp_decl_msvcattribute(cp, decl); continue; + case CTOK_CCDECL: +#if LJ_TARGET_X86 + CTF_INSERT(decl->fattr, CCONV, cp->ct->size); + decl->fattr |= CTFP_CCONV; +#endif + break; + case CTOK_PTRSZ: +#if LJ_64 + CTF_INSERT(decl->attr, MSIZEP, cp->ct->size); +#endif + break; + default: return; + } + cp_next(cp); + } +} + +/* Parse struct/union/enum name. */ +static CTypeID cp_struct_name(CPState *cp, CPDecl *sdecl, CTInfo info) +{ + CTypeID sid; + CType *ct; + cp->tmask = CPNS_STRUCT; + cp_next(cp); + cp_decl_attributes(cp, sdecl); + cp->tmask = CPNS_DEFAULT; + if (cp->tok != '{') { + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (cp->val.id) { /* Name of existing struct/union/enum. */ + sid = cp->val.id; + ct = cp->ct; + if ((ct->info ^ info) & (CTMASK_NUM|CTF_UNION)) /* Wrong type. */ + cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); + } else { /* Create named, incomplete struct/union/enum. */ + if ((cp->mode & CPARSE_MODE_NOIMPLICIT)) + cp_errmsg(cp, 0, LJ_ERR_FFI_BADTAG, strdata(cp->str)); + sid = lj_ctype_new(cp->cts, &ct); + ct->info = info; + ct->size = CTSIZE_INVALID; + ctype_setname(ct, cp->str); + lj_ctype_addname(cp->cts, ct, sid); + } + cp_next(cp); + } else { /* Create anonymous, incomplete struct/union/enum. */ + sid = lj_ctype_new(cp->cts, &ct); + ct->info = info; + ct->size = CTSIZE_INVALID; + } + if (cp->tok == '{') { + if (ct->size != CTSIZE_INVALID || ct->sib) + cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name)))); + ct->sib = 1; /* Indicate the type is currently being defined. */ + } + return sid; +} + +/* Determine field alignment. */ +static CTSize cp_field_align(CPState *cp, CType *ct, CTInfo info) +{ + CTSize align = ctype_align(info); + UNUSED(cp); UNUSED(ct); +#if (LJ_TARGET_X86 && !LJ_ABI_WIN) || (LJ_TARGET_ARM && __APPLE__) + /* The SYSV i386 and iOS ABIs limit alignment of non-vector fields to 2^2. */ + if (align > 2 && !(info & CTFP_ALIGNED)) { + if (ctype_isarray(info) && !(info & CTF_VECTOR)) { + do { + ct = ctype_rawchild(cp->cts, ct); + info = ct->info; + } while (ctype_isarray(info) && !(info & CTF_VECTOR)); + } + if (ctype_isnum(info) || ctype_isenum(info)) + align = 2; + } +#endif + return align; +} + +/* Layout struct/union fields. */ +static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr) +{ + CTSize bofs = 0, bmaxofs = 0; /* Bit offset and max. bit offset. */ + CTSize maxalign = ctype_align(sattr); + CType *sct = ctype_get(cp->cts, sid); + CTInfo sinfo = sct->info; + CTypeID fieldid = sct->sib; + while (fieldid) { + CType *ct = ctype_get(cp->cts, fieldid); + CTInfo attr = ct->size; /* Field declaration attributes (temp.). */ + + if (ctype_isfield(ct->info) || + (ctype_isxattrib(ct->info, CTA_SUBTYPE) && attr)) { + CTSize align, amask; /* Alignment (pow2) and alignment mask (bits). */ + CTSize sz; + CTInfo info = lj_ctype_info(cp->cts, ctype_cid(ct->info), &sz); + CTSize bsz, csz = 8*sz; /* Field size and container size (in bits). */ + sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */ + + /* Check for size overflow and determine alignment. */ + if (sz >= 0x20000000u || bofs + csz < bofs) { + if (!(sz == CTSIZE_INVALID && ctype_isarray(info) && + !(sinfo & CTF_UNION))) + cp_err(cp, LJ_ERR_FFI_INVSIZE); + csz = sz = 0; /* Treat a[] and a[?] as zero-sized. */ + } + align = cp_field_align(cp, ct, info); + if (((attr|sattr) & CTFP_PACKED) || + ((attr & CTFP_ALIGNED) && ctype_align(attr) > align)) + align = ctype_align(attr); + if (cp->packstack[cp->curpack] < align) + align = cp->packstack[cp->curpack]; + if (align > maxalign) maxalign = align; + amask = (8u << align) - 1; + + bsz = ctype_bitcsz(ct->info); /* Bitfield size (temp.). */ + if (bsz == CTBSZ_FIELD || !ctype_isfield(ct->info)) { + bsz = csz; /* Regular fields or subtypes always fill the container. */ + bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ + ct->size = (bofs >> 3); /* Store field offset. */ + } else { /* Bitfield. */ + if (bsz == 0 || (attr & CTFP_ALIGNED) || + (!((attr|sattr) & CTFP_PACKED) && (bofs & amask) + bsz > csz)) + bofs = (bofs + amask) & ~amask; /* Start new aligned field. */ + + /* Prefer regular field over bitfield. */ + if (bsz == csz && (bofs & amask) == 0) { + ct->info = CTINFO(CT_FIELD, ctype_cid(ct->info)); + ct->size = (bofs >> 3); /* Store field offset. */ + } else { + ct->info = CTINFO(CT_BITFIELD, + (info & (CTF_QUAL|CTF_UNSIGNED|CTF_BOOL)) + + (csz << (CTSHIFT_BITCSZ-3)) + (bsz << CTSHIFT_BITBSZ)); +#if LJ_BE + ct->info += ((csz - (bofs & (csz-1)) - bsz) << CTSHIFT_BITPOS); +#else + ct->info += ((bofs & (csz-1)) << CTSHIFT_BITPOS); +#endif + ct->size = ((bofs & ~(csz-1)) >> 3); /* Store container offset. */ + } + } + + /* Determine next offset or max. offset. */ + if ((sinfo & CTF_UNION)) { + if (bsz > bmaxofs) bmaxofs = bsz; + } else { + bofs += bsz; + } + } /* All other fields in the chain are already set up. */ + + fieldid = ct->sib; + } + + /* Complete struct/union. */ + sct->info = sinfo + CTALIGN(maxalign); + bofs = (sinfo & CTF_UNION) ? bmaxofs : bofs; + maxalign = (8u << maxalign) - 1; + sct->size = (((bofs + maxalign) & ~maxalign) >> 3); +} + +/* Parse struct/union declaration. */ +static CTypeID cp_decl_struct(CPState *cp, CPDecl *sdecl, CTInfo sinfo) +{ + CTypeID sid = cp_struct_name(cp, sdecl, sinfo); + if (cp_opt(cp, '{')) { /* Struct/union definition. */ + CTypeID lastid = sid; + int lastdecl = 0; + while (cp->tok != '}') { + CPDecl decl; + CPscl scl = cp_decl_spec(cp, &decl, CDF_STATIC); + decl.mode = scl ? CPARSE_MODE_DIRECT : + CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT|CPARSE_MODE_FIELD; + + for (;;) { + CTypeID typeid; + + if (lastdecl) cp_err_token(cp, '}'); + + /* Parse field declarator. */ + decl.bits = CTSIZE_INVALID; + cp_declarator(cp, &decl); + typeid = cp_decl_intern(cp, &decl); + + if ((scl & CDF_STATIC)) { /* Static constant in struct namespace. */ + CType *ct; + CTypeID fieldid = cp_decl_constinit(cp, &ct, typeid); + ctype_get(cp->cts, lastid)->sib = fieldid; + lastid = fieldid; + ctype_setname(ct, decl.name); + } else { + CTSize bsz = CTBSZ_FIELD; /* Temp. for layout phase. */ + CType *ct; + CTypeID fieldid = lj_ctype_new(cp->cts, &ct); /* Do this first. */ + CType *tct = ctype_raw(cp->cts, typeid); + + if (decl.bits == CTSIZE_INVALID) { /* Regular field. */ + if (ctype_isarray(tct->info) && tct->size == CTSIZE_INVALID) + lastdecl = 1; /* a[] or a[?] must be the last declared field. */ + + /* Accept transparent struct/union/enum. */ + if (!decl.name) { + if (!((ctype_isstruct(tct->info) && !(tct->info & CTF_VLA)) || + ctype_isenum(tct->info))) + cp_err_token(cp, CTOK_IDENT); + ct->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_SUBTYPE) + typeid); + ct->size = ctype_isstruct(tct->info) ? + (decl.attr|0x80000000u) : 0; /* For layout phase. */ + goto add_field; + } + } else { /* Bitfield. */ + bsz = decl.bits; + if (!ctype_isinteger_or_bool(tct->info) || + (bsz == 0 && decl.name) || 8*tct->size > CTBSZ_MAX || + bsz > ((tct->info & CTF_BOOL) ? 1 : 8*tct->size)) + cp_errmsg(cp, ':', LJ_ERR_BADVAL); + } + + /* Create temporary field for layout phase. */ + ct->info = CTINFO(CT_FIELD, typeid + (bsz << CTSHIFT_BITCSZ)); + ct->size = decl.attr; + if (decl.name) ctype_setname(ct, decl.name); + + add_field: + ctype_get(cp->cts, lastid)->sib = fieldid; + lastid = fieldid; + } + if (!cp_opt(cp, ',')) break; + cp_decl_reset(&decl); + } + cp_check(cp, ';'); + } + cp_check(cp, '}'); + ctype_get(cp->cts, lastid)->sib = 0; /* Drop sib = 1 for empty structs. */ + cp_decl_attributes(cp, sdecl); /* Layout phase needs postfix attributes. */ + cp_struct_layout(cp, sid, sdecl->attr); + } + return sid; +} + +/* Parse enum declaration. */ +static CTypeID cp_decl_enum(CPState *cp, CPDecl *sdecl) +{ + CTypeID eid = cp_struct_name(cp, sdecl, CTINFO(CT_ENUM, CTID_VOID)); + CTInfo einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_UINT32); + CTSize esize = 4; /* Only 32 bit enums are supported. */ + if (cp_opt(cp, '{')) { /* Enum definition. */ + CPValue k; + CTypeID lastid = eid; + k.u32 = 0; + k.id = CTID_INT32; + do { + GCstr *name = cp->str; + if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT); + if (cp->val.id) cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(name)); + cp_next(cp); + if (cp_opt(cp, '=')) { + cp_expr_kint(cp, &k); + if (k.id == CTID_UINT32) { + /* C99 says that enum constants are always (signed) integers. + ** But since unsigned constants like 0x80000000 are quite common, + ** those are left as uint32_t. + */ + if (k.i32 >= 0) k.id = CTID_INT32; + } else { + /* OTOH it's common practice and even mandated by some ABIs + ** that the enum type itself is unsigned, unless there are any + ** negative constants. + */ + k.id = CTID_INT32; + if (k.i32 < 0) einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_INT32); + } + } + /* Add named enum constant. */ + { + CType *ct; + CTypeID constid = lj_ctype_new(cp->cts, &ct); + ctype_get(cp->cts, lastid)->sib = constid; + lastid = constid; + ctype_setname(ct, name); + ct->info = CTINFO(CT_CONSTVAL, CTF_CONST|k.id); + ct->size = k.u32++; + if (k.u32 == 0x80000000u) k.id = CTID_UINT32; + lj_ctype_addname(cp->cts, ct, constid); + } + if (!cp_opt(cp, ',')) break; + } while (cp->tok != '}'); /* Trailing ',' is ok. */ + cp_check(cp, '}'); + /* Complete enum. */ + ctype_get(cp->cts, eid)->info = einfo; + ctype_get(cp->cts, eid)->size = esize; + } + return eid; +} + +/* Parse declaration specifiers. */ +static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl) +{ + uint32_t cds = 0, sz = 0; + CTInfo tdef = 0; + + decl->cp = cp; + decl->mode = cp->mode; + decl->name = NULL; + decl->redir = NULL; + decl->attr = 0; + decl->fattr = 0; + decl->pos = decl->top = 0; + decl->stack[0].next = 0; + + for (;;) { /* Parse basic types. */ + cp_decl_attributes(cp, decl); + switch (cp->tok) { + case CTOK_STRUCT: + tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, 0)); + break; + case CTOK_UNION: + tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, CTF_UNION)); + break; + case CTOK_ENUM: + tdef = cp_decl_enum(cp, decl); + break; + case CTOK_IDENT: + if (!ctype_istypedef(cp->ct->info) || sz || tdef || + (cds & (CDF_SHORT|CDF_LONG|CDF_SIGNED|CDF_UNSIGNED|CDF_COMPLEX))) + goto end_decl; + tdef = ctype_cid(cp->ct->info); /* Get typedef. */ + cp_next(cp); + break; + default: + if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECLFLAG) { + uint32_t cbit; + if (cp->ct->size) { + if (sz) goto end_decl; + sz = cp->ct->size; + } + cbit = (1u << (cp->tok - CTOK_FIRSTDECL)); + cds = cds | cbit | ((cbit & cds & CDF_LONG) << 1); + if (cp->tok >= CTOK_FIRSTSCL && !(scl & cbit)) + cp_errmsg(cp, cp->tok, LJ_ERR_FFI_BADSCL); + cp_next(cp); + break; + } + goto end_decl; + } + } +end_decl: + + if ((cds & CDF_COMPLEX)) /* Use predefined complex types. */ + tdef = sz == 4 ? CTID_COMPLEX_FLOAT : CTID_COMPLEX_DOUBLE; + + if (tdef) { + cp_push_type(decl, tdef); + } else if ((cds & CDF_VOID)) { + cp_push(decl, CTINFO(CT_VOID, (decl->attr & CTF_QUAL)), CTSIZE_INVALID); + decl->attr &= ~CTF_QUAL; + } else { + /* Determine type info and size. */ + CTInfo info = CTINFO(CT_NUM, (cds & CDF_UNSIGNED) ? CTF_UNSIGNED : 0); + if ((cds & CDF_BOOL)) { + info = CTINFO(CT_NUM, CTF_UNSIGNED|CTF_BOOL); + lua_assert(sz == 1); + } else if ((cds & CDF_FP)) { + info = CTINFO(CT_NUM, CTF_FP); + if ((cds & CDF_LONG)) sz = sizeof(long double); + } else if ((cds & CDF_CHAR)) { + if ((cds & (CDF_CHAR|CDF_SIGNED|CDF_UNSIGNED)) == CDF_CHAR) + info |= CTF_UCHAR; /* Handle platforms where char is unsigned. */ + } else if ((cds & CDF_SHORT)) { + sz = sizeof(short); + } else if ((cds & CDF_LONGLONG)) { + sz = 8; + } else if ((cds & CDF_LONG)) { + info |= CTF_LONG; + sz = sizeof(long); + } else if (!sz) { + if (!(cds & (CDF_SIGNED|CDF_UNSIGNED))) + cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); + sz = sizeof(int); + } + lua_assert(sz != 0); + info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ + info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ + cp_push(decl, info, sz); + decl->attr &= ~CTF_QUAL; + } + decl->specpos = decl->pos; + decl->specattr = decl->attr; + decl->specfattr = decl->fattr; + return (cds & CDF_SCL); /* Return storage class. */ +} + +/* Parse array declaration. */ +static void cp_decl_array(CPState *cp, CPDecl *decl) +{ + CTInfo info = CTINFO(CT_ARRAY, 0); + CTSize nelem = CTSIZE_INVALID; /* Default size for a[] or a[?]. */ + cp_decl_attributes(cp, decl); + if (cp_opt(cp, '?')) + info |= CTF_VLA; /* Create variable-length array a[?]. */ + else if (cp->tok != ']') + nelem = cp_expr_ksize(cp); + cp_check(cp, ']'); + cp_add(decl, info, nelem); +} + +/* Parse function declaration. */ +static void cp_decl_func(CPState *cp, CPDecl *fdecl) +{ + CTSize nargs = 0; + CTInfo info = CTINFO(CT_FUNC, 0); + CTypeID lastid = 0, anchor = 0; + if (cp->tok != ')') { + do { + CPDecl decl; + CTypeID typeid, fieldid; + CType *ct; + if (cp_opt(cp, '.')) { /* Vararg function. */ + cp_check(cp, '.'); /* Workaround for the minimalistic lexer. */ + cp_check(cp, '.'); + info |= CTF_VARARG; + break; + } + cp_decl_spec(cp, &decl, CDF_REGISTER); + decl.mode = CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT; + cp_declarator(cp, &decl); + typeid = cp_decl_intern(cp, &decl); + ct = ctype_raw(cp->cts, typeid); + if (ctype_isvoid(ct->info)) + break; + else if (ctype_isrefarray(ct->info)) + typeid = lj_ctype_intern(cp->cts, + CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ct->info)), CTSIZE_PTR); + else if (ctype_isfunc(ct->info)) + typeid = lj_ctype_intern(cp->cts, + CTINFO(CT_PTR, CTALIGN_PTR|typeid), CTSIZE_PTR); + /* Add new parameter. */ + fieldid = lj_ctype_new(cp->cts, &ct); + if (anchor) + ctype_get(cp->cts, lastid)->sib = fieldid; + else + anchor = fieldid; + lastid = fieldid; + if (decl.name) ctype_setname(ct, decl.name); + ct->info = CTINFO(CT_FIELD, typeid); + ct->size = nargs++; + } while (cp_opt(cp, ',')); + } + cp_check(cp, ')'); + if (cp_opt(cp, '{')) { /* Skip function definition. */ + int level = 1; + for (;;) { + if (cp->tok == '{') level++; + else if (cp->tok == '}' && --level == 0) break; + else if (cp->tok == CTOK_EOF) cp_err_token(cp, '}'); + cp_next(cp); + } + cp->tok = ';'; /* Ok for cp_decl_multi(), error in cp_decl_single(). */ + } + info |= (fdecl->fattr & ~CTMASK_CID); + fdecl->fattr = 0; + fdecl->stack[cp_add(fdecl, info, nargs)].sib = anchor; +} + +/* Parse declarator. */ +static void cp_declarator(CPState *cp, CPDecl *decl) +{ + if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS); + + for (;;) { /* Head of declarator. */ + if (cp_opt(cp, '*')) { /* Pointer. */ + CTSize sz; + CTInfo info; + cp_decl_attributes(cp, decl); + sz = CTSIZE_PTR; + info = CTINFO(CT_PTR, CTALIGN_PTR); +#if LJ_64 + if (ctype_msizeP(decl->attr) == 4) { + sz = 4; + info = CTINFO(CT_PTR, CTALIGN(2)); + } +#endif + info += (decl->attr & (CTF_QUAL|CTF_REF)); + decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<<CTSHIFT_MSIZEP)); + cp_push(decl, info, sz); + } else if (cp_opt(cp, '&') || cp_opt(cp, CTOK_ANDAND)) { /* Reference. */ + decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<<CTSHIFT_MSIZEP)); + cp_push(decl, CTINFO_REF(0), CTSIZE_PTR); + } else { + break; + } + } + + if (cp_opt(cp, '(')) { /* Inner declarator. */ + CPDeclIdx pos; + cp_decl_attributes(cp, decl); + /* Resolve ambiguity between inner declarator and 1st function parameter. */ + if ((decl->mode & CPARSE_MODE_ABSTRACT) && + (cp->tok == ')' || cp_istypedecl(cp))) goto func_decl; + pos = decl->pos; + cp_declarator(cp, decl); + cp_check(cp, ')'); + decl->pos = pos; + } else if (cp->tok == CTOK_IDENT) { /* Direct declarator. */ + if (!(decl->mode & CPARSE_MODE_DIRECT)) cp_err_token(cp, CTOK_EOF); + decl->name = cp->str; + decl->nameid = cp->val.id; + cp_next(cp); + } else { /* Abstract declarator. */ + if (!(decl->mode & CPARSE_MODE_ABSTRACT)) cp_err_token(cp, CTOK_IDENT); + } + + for (;;) { /* Tail of declarator. */ + if (cp_opt(cp, '[')) { /* Array. */ + cp_decl_array(cp, decl); + } else if (cp_opt(cp, '(')) { /* Function. */ + func_decl: + cp_decl_func(cp, decl); + } else { + break; + } + } + + if ((decl->mode & CPARSE_MODE_FIELD) && cp_opt(cp, ':')) /* Field width. */ + decl->bits = cp_expr_ksize(cp); + + /* Process postfix attributes. */ + cp_decl_attributes(cp, decl); + cp_push_attributes(decl); + + cp->depth--; +} + +/* Parse an abstract type declaration and return it's C type ID. */ +static CTypeID cp_decl_abstract(CPState *cp) +{ + CPDecl decl; + cp_decl_spec(cp, &decl, 0); + decl.mode = CPARSE_MODE_ABSTRACT; + cp_declarator(cp, &decl); + return cp_decl_intern(cp, &decl); +} + +/* Handle pragmas. */ +static void cp_pragma(CPState *cp, BCLine pragmaline) +{ + cp_next(cp); + if (cp->tok == CTOK_IDENT && + cp->str->hash == H_(e79b999f,42ca3e85)) { /* pack */ + cp_next(cp); + cp_check(cp, '('); + if (cp->tok == CTOK_IDENT) { + if (cp->str->hash == H_(738e923c,a1b65954)) { /* push */ + if (cp->curpack < CPARSE_MAX_PACKSTACK) { + cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack]; + cp->curpack++; + } + } else if (cp->str->hash == H_(6c71cf27,6c71cf27)) { /* pop */ + if (cp->curpack > 0) cp->curpack--; + } else { + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + } + cp_next(cp); + if (!cp_opt(cp, ',')) goto end_pack; + } + if (cp->tok == CTOK_INTEGER) { + cp->packstack[cp->curpack] = cp->val.u32 ? lj_fls(cp->val.u32) : 0; + cp_next(cp); + } else { + cp->packstack[cp->curpack] = 255; + } + end_pack: + cp_check(cp, ')'); + } else { /* Ignore all other pragmas. */ + while (cp->tok != CTOK_EOF && cp->linenumber == pragmaline) + cp_next(cp); + } +} + +/* Parse multiple C declarations of types or extern identifiers. */ +static void cp_decl_multi(CPState *cp) +{ + int first = 1; + while (cp->tok != CTOK_EOF) { + CPDecl decl; + CPscl scl; + if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */ + BCLine pragmaline = cp->linenumber; + if (!(cp_next(cp) == CTOK_IDENT && + cp->str->hash == H_(f5e6b4f8,1d509107))) /* pragma */ + cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); + cp_pragma(cp, pragmaline); + continue; + } + scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC); + if ((cp->tok == ';' || cp->tok == CTOK_EOF) && + ctype_istypedef(decl.stack[0].info)) { + CTInfo info = ctype_rawchild(cp->cts, &decl.stack[0])->info; + if (ctype_isstruct(info) || ctype_isenum(info)) + goto decl_end; /* Accept empty declaration of struct/union/enum. */ + } + for (;;) { + CTypeID typeid; + cp_declarator(cp, &decl); + typeid = cp_decl_intern(cp, &decl); + if (decl.name && !decl.nameid) { /* NYI: redeclarations are ignored. */ + CType *ct; + CTypeID id; + if ((scl & CDF_TYPEDEF)) { /* Create new typedef. */ + id = lj_ctype_new(cp->cts, &ct); + ct->info = CTINFO(CT_TYPEDEF, typeid); + goto noredir; + } else if (ctype_isfunc(ctype_get(cp->cts, typeid)->info)) { + /* Treat both static and extern function declarations as extern. */ + ct = ctype_get(cp->cts, typeid); + /* We always get new anonymous functions (typedefs are copied). */ + lua_assert(gcref(ct->name) == NULL); + id = typeid; /* Just name it. */ + } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ + id = cp_decl_constinit(cp, &ct, typeid); + goto noredir; + } else { /* External references have extern or no storage class. */ + id = lj_ctype_new(cp->cts, &ct); + ct->info = CTINFO(CT_EXTERN, typeid); + } + if (decl.redir) { /* Add attribute for redirected symbol name. */ + CType *cta; + CTypeID aid = lj_ctype_new(cp->cts, &cta); + ct = ctype_get(cp->cts, id); /* Table may have been reallocated. */ + cta->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_REDIR)); + cta->sib = ct->sib; + ct->sib = aid; + ctype_setname(cta, decl.redir); + } + noredir: + ctype_setname(ct, decl.name); + lj_ctype_addname(cp->cts, ct, id); + } + if (!cp_opt(cp, ',')) break; + cp_decl_reset(&decl); + } + decl_end: + if (cp->tok == CTOK_EOF && first) break; /* May omit ';' for 1 decl. */ + first = 0; + cp_check(cp, ';'); + } +} + +/* Parse a single C type declaration. */ +static void cp_decl_single(CPState *cp) +{ + CPDecl decl; + cp_decl_spec(cp, &decl, 0); + cp_declarator(cp, &decl); + cp->val.id = cp_decl_intern(cp, &decl); + if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF); +} + +#undef H_ + +/* ------------------------------------------------------------------------ */ + +/* Protected callback for C parser. */ +static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud) +{ + CPState *cp = (CPState *)ud; + UNUSED(dummy); + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + cp_init(cp); + if ((cp->mode & CPARSE_MODE_MULTI)) + cp_decl_multi(cp); + else + cp_decl_single(cp); + lua_assert(cp->depth == 0); + return NULL; +} + +/* C parser. */ +int lj_cparse(CPState *cp) +{ + LJ_CTYPE_SAVE(cp->cts); + int errcode = lj_vm_cpcall(cp->L, NULL, cp, cpcparser); + if (errcode) + LJ_CTYPE_RESTORE(cp->cts); + cp_cleanup(cp); + return errcode; +} + +#endif diff --git a/src/luajit2/src/lj_cparse.h b/src/luajit2/src/lj_cparse.h new file mode 100644 index 0000000000..535bc46198 --- /dev/null +++ b/src/luajit2/src/lj_cparse.h @@ -0,0 +1,63 @@ +/* +** C declaration parser. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CPARSE_H +#define _LJ_CPARSE_H + +#include "lj_obj.h" +#include "lj_ctype.h" + +#if LJ_HASFFI + +/* C parser limits. */ +#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */ +#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */ +#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */ +#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */ + +/* Flags for C parser mode. */ +#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */ +#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */ +#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */ +#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */ +#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */ + +typedef int CPChar; /* C parser character. Unsigned ext. from char. */ +typedef int CPToken; /* C parser token. */ + +/* C parser internal value representation. */ +typedef struct CPValue { + union { + int32_t i32; /* Value for CTID_INT32. */ + uint32_t u32; /* Value for CTID_UINT32. */ + }; + CTypeID id; /* C Type ID of the value. */ +} CPValue; + +/* C parser state. */ +typedef struct CPState { + CPChar c; /* Current character. */ + CPToken tok; /* Current token. */ + CPValue val; /* Token value. */ + GCstr *str; /* Interned string of identifier/keyword. */ + CType *ct; /* C type table entry. */ + const char *p; /* Current position in input buffer. */ + SBuf sb; /* String buffer for tokens. */ + lua_State *L; /* Lua state. */ + CTState *cts; /* C type state. */ + const char *srcname; /* Current source name. */ + BCLine linenumber; /* Input line counter. */ + int depth; /* Recursive declaration depth. */ + uint32_t tmask; /* Type mask for next identifier. */ + uint32_t mode; /* C parser mode. */ + uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */ + uint8_t curpack; /* Current position in pack pragma stack. */ +} CPState; + +LJ_FUNC int lj_cparse(CPState *cp); + +#endif + +#endif diff --git a/src/luajit2/src/lj_crecord.c b/src/luajit2/src/lj_crecord.c new file mode 100644 index 0000000000..743137de1c --- /dev/null +++ b/src/luajit2/src/lj_crecord.c @@ -0,0 +1,1159 @@ +/* +** Trace recorder for C data operations. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ffrecord_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT && LJ_HASFFI + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_frame.h" +#include "lj_ctype.h" +#include "lj_cparse.h" +#include "lj_cconv.h" +#include "lj_clib.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_crecord.h" +#include "lj_dispatch.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +#define emitconv(a, dt, st, flags) \ + emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags)) + +/* -- C type checks ------------------------------------------------------- */ + +static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o) +{ + GCcdata *cd; + TRef trtypeid; + if (!tref_iscdata(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + cd = cdataV(o); + /* Specialize to the CTypeID. */ + trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_TYPEID); + emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->typeid)); + return cd; +} + +/* Specialize to the CTypeID held by a cdata constructor. */ +static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) +{ + CTypeID id; + lua_assert(tref_iscdata(tr) && cd->typeid == CTID_CTYPEID); + id = *(CTypeID *)cdataptr(cd); + tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata))); + tr = emitir(IRT(IR_XLOAD, IRT_INT), tr, 0); + emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); + return id; +} + +static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o) +{ + if (tref_isstr(tr)) { + GCstr *s = strV(o); + CPState cp; + CTypeID oldtop; + /* Specialize to the string containing the C type declaration. */ + emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s)); + cp.L = J->L; + cp.cts = ctype_ctsG(J2G(J)); + oldtop = cp.cts->top; + cp.srcname = strdata(s); + cp.p = strdata(s); + cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT; + if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + return cp.val.id; + } else { + GCcdata *cd = argv2cdata(J, tr, o); + return cd->typeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) : + cd->typeid; + } +} + +/* -- Convert C type to C type -------------------------------------------- */ + +/* +** This code mirrors the code in lj_cconv.c. It performs the same steps +** for the trace recorder that lj_cconv.c does for the interpreter. +** +** One major difference is that we can get away with much fewer checks +** here. E.g. checks for casts, constness or correct types can often be +** omitted, even if they might fail. The interpreter subsequently throws +** an error, which aborts the trace. +** +** All operations are specialized to their C types, so the on-trace +** outcome must be the same as the outcome in the interpreter. If the +** interpreter doesn't throw an error, then the trace is correct, too. +** Care must be taken not to generate invalid (temporary) IR or to +** trigger asserts. +*/ + +/* Convert CType to IRType. */ +static IRType crec_ct2irt(CType *ct) +{ + if (LJ_LIKELY(ctype_isnum(ct->info))) { + if ((ct->info & CTF_FP)) { + if (ct->size == sizeof(double)) + return IRT_NUM; + else if (ct->size == sizeof(float)) + return IRT_FLOAT; + } else { + uint32_t b = lj_fls(ct->size); + if (b <= 3) + return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0); + } + } else if (ctype_isptr(ct->info)) { + return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + } else if (ctype_iscomplex(ct->info)) { + if (ct->size == 2*sizeof(double)) + return IRT_NUM; + else if (ct->size == 2*sizeof(float)) + return IRT_FLOAT; + } + return IRT_CDATA; +} + +/* Determine whether a passed number or cdata number is non-zero. */ +static int crec_isnonzero(CType *s, void *p) +{ + if (p == (void *)0) + return 0; + if (p == (void *)1) + return 1; + if ((s->info & CTF_FP)) { + if (s->size == sizeof(float)) + return (*(float *)p != 0); + else + return (*(double *)p != 0); + } else { + if (s->size == 1) + return (*(uint8_t *)p != 0); + else if (s->size == 2) + return (*(uint16_t *)p != 0); + else if (s->size == 4) + return (*(uint32_t *)p != 0); + else + return (*(uint64_t *)p != 0); + } +} + +static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp, + void *svisnz) +{ + CTSize dsize = d->size, ssize = s->size; + CTInfo dinfo = d->info, sinfo = s->info; + IRType dt = crec_ct2irt(d); + IRType st = crec_ct2irt(s); + + if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) + goto err_conv; + + /* + ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and + ** numbers up to 8 bytes. Otherwise sp holds a pointer. + */ + + switch (cconv_idx2(dinfo, sinfo)) { + /* Destination is a bool. */ + case CCX(B, B): + goto xstore; /* Source operand is already normalized. */ + case CCX(B, I): + case CCX(B, F): + if (st != IRT_CDATA) { + /* Specialize to the result of a comparison against 0. */ + TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) : + (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) : + lj_ir_kint(J, 0); + int isnz = crec_isnonzero(s, svisnz); + emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero); + sp = lj_ir_kint(J, isnz); + goto xstore; + } + goto err_nyi; + + /* Destination is an integer. */ + case CCX(I, B): + case CCX(I, I): + conv_I_I: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + /* Extend 32 to 64 bit integer. */ + if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED))) + sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, + (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); + else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */ + sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0); + else if (st == IRT_INT) + sp = lj_opt_narrow_toint(J, sp); + xstore: + if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J); + if (dp == 0) return sp; + emitir(IRT(IR_XSTORE, dt), dp, sp); + break; + case CCX(I, C): + sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ + /* fallthrough */ + case CCX(I, F): + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_TRUNC|IRCONV_ANY); + goto xstore; + case CCX(I, P): + case CCX(I, A): + sinfo = CTINFO(CT_NUM, CTF_UNSIGNED); + ssize = CTSIZE_PTR; + st = IRT_UINTP; + goto conv_I_I; + + /* Destination is a floating-point number. */ + case CCX(F, B): + case CCX(F, I): + conv_F_I: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0); + goto xstore; + case CCX(F, C): + sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */ + /* fallthrough */ + case CCX(F, F): + conv_F_F: + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + if (dt != st) sp = emitconv(sp, dt, st, 0); + goto xstore; + + /* Destination is a complex number. */ + case CCX(C, I): + case CCX(C, F): + { /* Clear im. */ + TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); + emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0)); + } + /* Convert to re. */ + if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I; + + case CCX(C, C): + if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; + { + TRef re, im, ptr; + re = emitir(IRT(IR_XLOAD, st), sp, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1))); + im = emitir(IRT(IR_XLOAD, st), ptr, 0); + if (dt != st) { + re = emitconv(re, dt, st, 0); + im = emitconv(im, dt, st, 0); + } + emitir(IRT(IR_XSTORE, dt), dp, re); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1))); + emitir(IRT(IR_XSTORE, dt), ptr, im); + } + break; + + /* Destination is a vector. */ + case CCX(V, I): + case CCX(V, F): + case CCX(V, C): + case CCX(V, V): + goto err_nyi; + + /* Destination is a pointer. */ + case CCX(P, P): + case CCX(P, A): + case CCX(P, S): + /* There are only 32 bit pointers/addresses on 32 bit machines. + ** Also ok on x64, since all 32 bit ops clear the upper part of the reg. + */ + goto xstore; + case CCX(P, I): + if (st == IRT_CDATA) goto err_nyi; + if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */ + sp = emitconv(sp, IRT_U32, st, 0); + goto xstore; + case CCX(P, F): + if (st == IRT_CDATA) goto err_nyi; + /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ + sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32, + st, IRCONV_TRUNC|IRCONV_ANY); + goto xstore; + + /* Destination is an array. */ + case CCX(A, A): + goto err_nyi; + + /* Destination is a struct/union. */ + case CCX(S, S): + goto err_nyi; + + default: + err_conv: + err_nyi: + lj_trace_err(J, LJ_TRERR_NYICONV); + break; + } + return 0; +} + +/* -- Convert C type to TValue (load) ------------------------------------- */ + +static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTInfo sinfo = s->info; + lua_assert(!ctype_isenum(sinfo)); + if (ctype_isnum(sinfo)) { + IRType t = crec_ct2irt(s); + TRef tr; + if (t == IRT_CDATA) + goto err_nyi; /* NYI: copyval of >64 bit integers. */ + tr = emitir(IRT(IR_XLOAD, t), sp, 0); + if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */ + return emitconv(tr, IRT_NUM, t, 0); + } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */ + sp = tr; + lj_needsplit(J); + } else if ((sinfo & CTF_BOOL)) { + /* Assume not equal to zero. Fixup and emit pending guard later. */ + lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0)); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } else { + return tr; + } + } else if (ctype_isptr(sinfo)) { + IRType t = (LJ_64 && s->size == 8) ? IRT_P64 : IRT_P32; + sp = emitir(IRT(IR_XLOAD, t), sp, 0); + } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) { + cts->L = J->L; + sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */ + } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */ + IRType t = s->size == 2*sizeof(double) ? IRT_NUM : IRT_FLOAT; + ptrdiff_t esz = (ptrdiff_t)(s->size >> 1); + TRef ptr, tr1, tr2, dp; + dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL); + tr1 = emitir(IRT(IR_XLOAD, t), sp, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz)); + tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata))); + emitir(IRT(IR_XSTORE, t), ptr, tr1); + ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz)); + emitir(IRT(IR_XSTORE, t), ptr, tr2); + return dp; + } else { + /* NYI: copyval of vectors. */ + err_nyi: + lj_trace_err(J, LJ_TRERR_NYICONV); + } + /* Box pointer, ref or 64 bit integer. */ + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp); +} + +/* -- Convert TValue to C type (store) ------------------------------------ */ + +static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, TValue *sval) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTypeID sid = CTID_P_VOID; + void *svisnz = 0; + CType *s; + if (LJ_LIKELY(tref_isinteger(sp))) { + sid = CTID_INT32; + svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); + } else if (tref_isnum(sp)) { + sid = CTID_DOUBLE; + svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval)); + } else if (tref_isbool(sp)) { + sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0); + sid = CTID_BOOL; + } else if (tref_isnil(sp)) { + sp = lj_ir_kptr(J, NULL); + } else if (tref_isudata(sp)) { + sp = emitir(IRT(IR_ADD, IRT_P32), sp, lj_ir_kint(J, sizeof(GCudata))); + } else if (tref_isstr(sp)) { + if (ctype_isenum(d->info)) { /* Match string against enum constant. */ + GCstr *str = strV(sval); + CTSize ofs; + CType *cct = lj_ctype_getfield(cts, d, str, &ofs); + /* Specialize to the name of the enum constant. */ + emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); + if (cct && ctype_isconstval(cct->info)) { + lua_assert(ctype_child(cts, cct)->size == 4); + svisnz = (void *)(intptr_t)(cct->size != 0); + sp = lj_ir_kint(J, (int32_t)cct->size); + sid = ctype_cid(cct->info); + } /* else: interpreter will throw. */ + } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */ + } else { /* Otherwise pass the string data as a const char[]. */ + sp = emitir(IRT(IR_STRREF, IRT_P32), sp, lj_ir_kint(J, 0)); + sid = CTID_A_CCHAR; + } + } else { /* NYI: tref_istab(sp), tref_islightud(sp). */ + sid = argv2cdata(J, sp, sval)->typeid; + s = ctype_raw(cts, sid); + svisnz = cdataptr(cdataV(sval)); + if (ctype_isptr(s->info)) { + IRType t = (LJ_64 && s->size == 8) ? IRT_P64 : IRT_P32; + sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR); + if (ctype_isref(s->info)) { + svisnz = *(void **)svisnz; + s = ctype_rawchild(cts, s); + } else { + goto doconv; /* The pointer value was loaded, don't load number. */ + } + } else if (ctype_isinteger(s->info) && s->size == 8) { + IRType t = (s->info & CTF_UNSIGNED) ? IRT_U64 : IRT_I64; + sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64); + lj_needsplit(J); + goto doconv; + } else { + sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata))); + } + if (ctype_isenum(s->info)) s = ctype_child(cts, s); + if (ctype_isnum(s->info)) { /* Load number value. */ + IRType t = crec_ct2irt(s); + if (t != IRT_CDATA) { + sp = emitir(IRT(IR_XLOAD, t), sp, 0); + if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); + } + } + goto doconv; + } + s = ctype_get(cts, sid); +doconv: + if (ctype_isenum(d->info)) d = ctype_child(cts, d); + return crec_ct_ct(J, d, s, dp, sp, svisnz); +} + +/* -- C data metamethods -------------------------------------------------- */ + +/* This would be rather difficult in FOLD, so do it here: +** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k) +** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz) +*/ +static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz) +{ + IRIns *ir = IR(tref_ref(tr)); + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) && + (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) { + IRIns *irk = IR(ir->op2); + ptrdiff_t k; + if (LJ_64 && irk->o == IR_KINT64) + k = (ptrdiff_t)ir_kint64(irk)->u64 * sz; + else + k = (ptrdiff_t)irk->i * sz; + if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k; + tr = ir->op1; /* Not a TRef, but the caller doesn't care. */ + } + return tr; +} + +/* Record ctype __index/__newindex metamethods. */ +static void crec_index_meta(jit_State *J, CTState *cts, CType *ct, + RecordFFData *rd) +{ + CTypeID id = ctype_typeid(cts, ct); + cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index); + if (!tv) + lj_trace_err(J, LJ_TRERR_BADTYPE); + if (tvisfunc(tv)) { + J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; + rd->nres = -1; /* Pending tailcall. */ + } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) { + /* Specialize to result of __index lookup. */ + cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]); + IRType t = itype2irt(o); + if (tvisgcv(o)) + J->base[0] = lj_ir_kgc(J, gcV(o), t); + else if (tvisint(o)) + J->base[0] = lj_ir_kint(J, intV(o)); + else if (tvisnum(o)) + J->base[0] = lj_ir_knumint(J, numV(o)); + else if (tvisbool(o)) + J->base[0] = TREF_PRI(t); + else + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* Always specialize to the key. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1]))); + } else { + /* NYI: resolving of non-function metamethods. */ + /* NYI: non-string keys for __index table. */ + /* NYI: stores to __newindex table. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + } +} + +void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd) +{ + TRef idx, ptr = J->base[0]; + ptrdiff_t ofs = sizeof(GCcdata); + GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]); + CTState *cts = ctype_ctsG(J2G(J)); + CType *ct = ctype_raw(cts, cd->typeid); + CTypeID sid = 0; + + /* Resolve pointer or reference for cdata object. */ + if (ctype_isptr(ct->info)) { + IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR); + ofs = 0; + ptr = crec_reassoc_ofs(J, ptr, &ofs, 1); + } + +again: + idx = J->base[1]; + if (tref_isnumber(idx)) { + idx = lj_opt_narrow_cindex(J, idx); + if (ctype_ispointer(ct->info)) { + CTSize sz; + integer_key: + if ((ct->info & CTF_COMPLEX)) + idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1)); + sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info))); + idx = crec_reassoc_ofs(J, idx, &ofs, sz); +#if LJ_TARGET_ARM + /* Hoist base add to allow fusion of shifts into operands. */ + if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs && (sz == 1 || sz == 4)) { + ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); + ofs = 0; + } +#endif + idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz)); + ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr); + } + } else if (tref_iscdata(idx)) { + GCcdata *cdk = cdataV(&rd->argv[1]); + CType *ctk = ctype_raw(cts, cdk->typeid); + IRType t; + if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk); + if (ctype_ispointer(ct->info) && + ctype_isinteger(ctk->info) && (t = crec_ct2irt(ctk)) != IRT_CDATA) { + if (ctk->size == 8) { + idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64); + } else { + idx = emitir(IRT(IR_ADD, IRT_PTR), idx, + lj_ir_kintp(J, sizeof(GCcdata))); + idx = emitir(IRT(IR_XLOAD, t), idx, 0); + } + if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED)) + idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT); + if (!LJ_64 && ctk->size > sizeof(intptr_t)) { + idx = emitconv(idx, IRT_INTP, t, 0); + lj_needsplit(J); + } + goto integer_key; + } + } else if (tref_isstr(idx)) { + GCstr *name = strV(&rd->argv[1]); + if (cd->typeid == CTID_CTYPEID) + ct = ctype_raw(cts, crec_constructor(J, cd, ptr)); + if (ctype_isstruct(ct->info)) { + CTSize fofs; + CType *fct; + fct = lj_ctype_getfield(cts, ct, name, &fofs); + if (fct) { + /* Always specialize to the field name. */ + emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); + if (ctype_isconstval(fct->info)) { + if (fct->size >= 0x80000000u && + (ctype_child(cts, fct)->info & CTF_UNSIGNED)) { + J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size); + return; + } + J->base[0] = lj_ir_kint(J, (int32_t)fct->size); + return; /* Interpreter will throw for newindex. */ + } else if (ctype_isbitfield(fct->info)) { + lj_trace_err(J, LJ_TRERR_NYICONV); + } else { + lua_assert(ctype_isfield(fct->info)); + sid = ctype_cid(fct->info); + } + ofs += (ptrdiff_t)fofs; + } + } else if (ctype_iscomplex(ct->info)) { + if (name->len == 2 && + ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') || + (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) { + /* Always specialize to the field name. */ + emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); + if (strdata(name)[0] == 'i') ofs += (ct->size >> 1); + sid = ctype_cid(ct->info); + } + } + } + if (!sid) { + if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */ + CType *cct = ctype_rawchild(cts, ct); + if (ctype_isstruct(cct->info)) { + ct = cct; + if (tref_isstr(idx)) goto again; + } + } + crec_index_meta(J, cts, ct, rd); + return; + } + + if (ofs) + ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs)); + + /* Resolve reference for field. */ + ct = ctype_get(cts, sid); + if (ctype_isref(ct->info)) + ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0); + + while (ctype_isattrib(ct->info)) + ct = ctype_child(cts, ct); /* Skip attributes. */ + + if (rd->data == 0) { /* __index metamethod. */ + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); /* Skip enums. */ + J->base[0] = crec_tv_ct(J, ct, sid, ptr); + } else { /* __newindex metamethod. */ + rd->nres = 0; + J->needsnap = 1; + crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]); + } +} + +/* Record cdata allocation. */ +static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CTSize sz; + CTInfo info = lj_ctype_info(cts, id, &sz); + CType *d = ctype_raw(cts, id); + TRef trid; + if (sz == 0 || sz > 64 || (info & CTF_VLA) || ctype_align(info) > CT_MEMALIGN) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: large/special allocations. */ + trid = lj_ir_kint(J, id); + /* Use special instruction to box pointer or 64 bit integer. */ + if (ctype_isptr(info) || (ctype_isinteger(info) && sz == 8)) { + TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) : + ctype_isptr(info) ? lj_ir_kptr(J, NULL) : + (lj_needsplit(J), lj_ir_kint64(J, 0)); + J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp); + } else { + TRef trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, TREF_NIL); + cTValue *fin; + J->base[0] = trcd; + if (J->base[1] && !J->base[2] && !lj_cconv_multi_init(d, &rd->argv[1])) { + goto single_init; + } else if (ctype_isarray(d->info)) { + CType *dc = ctype_rawchild(cts, d); /* Array element type. */ + CTSize ofs, esize = dc->size; + TRef sp = 0; + TValue tv; + TValue *sval = &tv; + MSize i; + tv.u64 = 0; + if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info))) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init array of aggregates. */ + for (i = 1, ofs = 0; ofs < sz; ofs += esize) { + TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, + lj_ir_kintp(J, ofs + sizeof(GCcdata))); + if (J->base[i]) { + sp = J->base[i]; + sval = &rd->argv[i]; + i++; + } else if (i != 2) { + sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL; + } + crec_ct_tv(J, dc, dp, sp, sval); + } + } else if (ctype_isstruct(d->info)) { + CTypeID fid = d->sib; + MSize i = 1; + while (fid) { + CType *df = ctype_get(cts, fid); + fid = df->sib; + if (ctype_isfield(df->info)) { + CType *dc; + TRef sp, dp; + TValue tv; + TValue *sval = &tv; + setintV(&tv, 0); + if (!gcref(df->name)) continue; /* Ignore unnamed fields. */ + dc = ctype_rawchild(cts, df); /* Field type. */ + if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info))) + lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */ + if (J->base[i]) { + sp = J->base[i]; + sval = &rd->argv[i]; + i++; + } else { + sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL; + } + dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, + lj_ir_kintp(J, df->size + sizeof(GCcdata))); + crec_ct_tv(J, dc, dp, sp, sval); + } else if (!ctype_isconstval(df->info)) { + /* NYI: init bitfields and sub-structures. */ + lj_trace_err(J, LJ_TRERR_NYICONV); + } + } + } else { + TRef dp; + single_init: + dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata))); + if (J->base[1]) { + crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]); + } else { + TValue tv; + tv.u64 = 0; + crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv); + } + } + /* Handle __gc metamethod. */ + fin = lj_ctype_meta(cts, id, MM_gc); + if (fin) { + TRef trlo = lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd); + TRef trhi = emitir(IRT(IR_ADD, IRT_P32), trlo, lj_ir_kint(J, 4)); + if (LJ_BE) { TRef tmp = trlo; trlo = trhi; trhi = tmp; } + if (tvisfunc(fin)) { + emitir(IRT(IR_XSTORE, IRT_P32), trlo, lj_ir_kfunc(J, funcV(fin))); + emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TFUNC)); + } else if (tviscdata(fin)) { + emitir(IRT(IR_XSTORE, IRT_P32), trlo, + lj_ir_kgc(J, obj2gco(cdataV(fin)), IRT_CDATA)); + emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TCDATA)); + } else { + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + J->needsnap = 1; + } + } +} + +/* Record argument conversions. */ +static TRef crec_call_args(jit_State *J, RecordFFData *rd, + CTState *cts, CType *ct) +{ + TRef args[CCI_NARGS_MAX]; + MSize i, n; + TRef tr; + args[0] = TREF_NIL; + for (n = 0; J->base[n+1]; n++) { + CType *d; + do { + if (!ct->sib || n >= CCI_NARGS_MAX) + lj_trace_err(J, LJ_TRERR_NYICALL); + ct = ctype_get(cts, ct->sib); + } while (ctype_isattrib(ct->info)); + if (!ctype_isfield(ct->info)) + lj_trace_err(J, LJ_TRERR_NYICALL); + d = ctype_rawchild(cts, ct); + if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || + ctype_isenum(d->info))) + lj_trace_err(J, LJ_TRERR_NYICALL); + args[n] = crec_ct_tv(J, d, 0, J->base[n+1], &rd->argv[n+1]); + } + tr = args[0]; + for (i = 1; i < n; i++) + tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]); + return tr; +} + +/* Record function call. */ +static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *ct = ctype_raw(cts, cd->typeid); + IRType tp = IRT_PTR; + if (ctype_isptr(ct->info)) { + tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + ct = ctype_rawchild(cts, ct); + } + if (ctype_isfunc(ct->info)) { + TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR); + CType *ctr = ctype_rawchild(cts, ct); + IRType t = crec_ct2irt(ctr); + TRef tr; + if (ctype_isvoid(ctr->info)) { + t = IRT_NIL; + rd->nres = 0; + } else if (ctype_isenum(ctr->info)) { + ctr = ctype_child(cts, ctr); + } + if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) || + ctype_isvoid(ctr->info)) || + ctype_isbool(ctr->info) || (ct->info & CTF_VARARG) || +#if LJ_TARGET_X86 + ctype_cconv(ct->info) != CTCC_CDECL || +#endif + t == IRT_CDATA) + lj_trace_err(J, LJ_TRERR_NYICALL); + tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func); + if (t == IRT_FLOAT || t == IRT_U32) { + tr = emitconv(tr, IRT_NUM, t, 0); + } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) || + (t == IRT_I64 || t == IRT_U64)) { + TRef trid = lj_ir_kint(J, ctype_cid(ct->info)); + tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr); + if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); + } + J->base[0] = tr; + J->needsnap = 1; + return 1; + } + return 0; +} + +void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd) +{ + GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]); + if (cd->typeid == CTID_CTYPEID) + crec_alloc(J, rd, crec_constructor(J, cd, J->base[0])); + else if (!crec_call(J, rd, cd)) + lj_trace_err(J, LJ_TRERR_BADTYPE); +} + +static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm) +{ + if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) { + IRType dt; + CTypeID id; + TRef tr; + MSize i; + lj_needsplit(J); + if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) || + ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) { + dt = IRT_U64; id = CTID_UINT64; + } else { + dt = IRT_I64; id = CTID_INT64; + } + for (i = 0; i < 2; i++) { + IRType st = tref_type(sp[i]); + if (st == IRT_NUM || st == IRT_FLOAT) + sp[i] = emitconv(sp[i], dt, st, IRCONV_TRUNC|IRCONV_ANY); + else if (!(st == IRT_I64 || st == IRT_U64)) + sp[i] = emitconv(sp[i], dt, IRT_INT, + ((st - IRT_I8) & 1) ? 0 : IRCONV_SEXT); + } + if (mm < MM_add) { + /* Assume true comparison. Fixup and emit pending guard later. */ + IROp op; + if (mm == MM_eq) { + op = IR_EQ; + } else { + op = mm == MM_lt ? IR_LT : IR_LE; + if (dt == IRT_U64) + op += (IR_ULT-IR_LT); + } + lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } else { + tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]); + } + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + } + return 0; +} + +static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *ctp = s[0]; + if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) { + if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) && + (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { + if (mm == MM_sub) { /* Pointer difference. */ + TRef tr; + CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); + if (sz == 0 || (sz & (sz-1)) != 0) + return 0; /* NYI: integer division. */ + tr = emitir(IRT(IR_SUB, IRT_PTR), sp[0], sp[1]); + tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz))); +#if LJ_64 + tr = emitconv(tr, IRT_NUM, IRT_INTP, 0); +#endif + return tr; + } else { /* Pointer comparison (unsigned). */ + /* Assume true comparison. Fixup and emit pending guard later. */ + IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE; + lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]); + J->postproc = LJ_POST_FIXGUARD; + return TREF_TRUE; + } + } + if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info))) + return 0; + } else if (mm == MM_add && ctype_isnum(ctp->info) && + (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) { + TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */ + ctp = s[1]; + } else { + return 0; + } + { + TRef tr = sp[1]; + IRType t = tref_type(tr); + CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info)); + CTypeID id; +#if LJ_64 + if (t == IRT_NUM || t == IRT_FLOAT) + tr = emitconv(tr, IRT_INTP, t, IRCONV_TRUNC|IRCONV_ANY); + else if (!(t == IRT_I64 || t == IRT_U64)) + tr = emitconv(tr, IRT_INTP, IRT_INT, + ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT); +#else + if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) { + tr = emitconv(tr, IRT_INTP, t, + (t == IRT_NUM || t == IRT_FLOAT) ? + IRCONV_TRUNC|IRCONV_ANY : 0); + } +#endif + tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz)); + tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr); + id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)), + CTSIZE_PTR); + return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr); + } +} + +/* Record ctype arithmetic metamethods. */ +static void crec_arith_meta(jit_State *J, CTState *cts, RecordFFData *rd) +{ + cTValue *tv = NULL; + if (J->base[0]) { + if (tviscdata(&rd->argv[0])) + tv = lj_ctype_meta(cts, argv2cdata(J, J->base[0], &rd->argv[0])->typeid, + (MMS)rd->data); + if (!tv && J->base[1] && tviscdata(&rd->argv[1])) + tv = lj_ctype_meta(cts, argv2cdata(J, J->base[1], &rd->argv[1])->typeid, + (MMS)rd->data); + } + if (tv && tvisfunc(tv)) { + J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; + rd->nres = -1; /* Pending tailcall. */ + } else { + /* NYI: non-function metamethods. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + } +} + +void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef sp[2]; + CType *s[2]; + MSize i; + for (i = 0; i < 2; i++) { + TRef tr = J->base[i]; + CType *ct = ctype_get(cts, CTID_DOUBLE); + if (!tr) { + goto trymeta; + } else if (tref_iscdata(tr)) { + CTypeID id = argv2cdata(J, tr, &rd->argv[i])->typeid; + ct = ctype_raw(cts, id); + if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */ + IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32; + if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct); + tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR); + } else if (ctype_isinteger(ct->info) && ct->size == 8) { + IRType t = (ct->info & CTF_UNSIGNED) ? IRT_U64 : IRT_I64; + tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64); + lj_needsplit(J); + goto ok; + } else { + tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata))); + } + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info)) { + IRType t = crec_ct2irt(ct); + if (t == IRT_CDATA) goto trymeta; + if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J); + tr = emitir(IRT(IR_XLOAD, t), tr, 0); + } else if (!(ctype_isptr(ct->info) || ctype_isrefarray(ct->info))) { + goto trymeta; + } + } else if (tref_isnil(tr)) { + tr = lj_ir_kptr(J, NULL); + ct = ctype_get(cts, CTID_P_VOID); + } else if (tref_isinteger(tr)) { + ct = ctype_get(cts, CTID_INT32); + } else if (!tref_isnum(tr)) { + goto trymeta; + } + ok: + s[i] = ct; + sp[i] = tr; + } + { + TRef tr; + if ((tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) || + (tr = crec_arith_ptr(J, sp, s, (MMS)rd->data))) { + J->base[0] = tr; + /* Fixup cdata comparisons, too. Avoids some cdata escapes. */ + if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1)) { + const BCIns *pc = frame_contpc(J->L->base-1) - 1; + if (bc_op(*pc) <= BC_ISNEP) { + setframe_pc(&J2G(J)->tmptv, pc); + J2G(J)->tmptv.u32.lo = ((tref_istrue(tr) ^ bc_op(*pc)) & 1); + J->postproc = LJ_POST_FIXCOMP; + } + } + } else { + trymeta: + crec_arith_meta(J, cts, rd); + } + } +} + +/* -- C library namespace metamethods ------------------------------------- */ + +void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) && + udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) { + CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0])); + GCstr *name = strV(&rd->argv[1]); + CType *ct; + CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX); + cTValue *tv = lj_tab_getstr(cl->cache, name); + if (id && tv && tviscdata(tv)) { + /* Specialize to the symbol name and make the result a constant. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name)); + if (ctype_isconstval(ct->info)) { + if (ct->size >= 0x80000000u && + (ctype_child(cts, ct)->info & CTF_UNSIGNED)) + J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size); + else + J->base[0] = lj_ir_kint(J, (int32_t)ct->size); + } else if (ctype_isextern(ct->info)) { + lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI: access extern variables. */ + } else { + J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA); + } + } else { + lj_trace_err(J, LJ_TRERR_NOCACHE); + } + } /* else: interpreter will throw. */ +} + +/* -- FFI library functions ----------------------------------------------- */ + +static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval) +{ + return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval); +} + +void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd) +{ + crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0])); +} + +void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef tr = J->base[0]; + if (tr) { + TRef trlen = J->base[1]; + if (trlen) { + trlen = crec_toint(J, cts, trlen, &rd->argv[1]); + tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]); + } else { + tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]); + trlen = lj_ir_call(J, IRCALL_strlen, tr); + } + J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen); + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2]; + if (trdst && trsrc && (trlen || tref_isstr(trsrc))) { + trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]); + trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]); + if (trlen) { + trlen = crec_toint(J, cts, trlen, &rd->argv[2]); + } else { + trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN); + trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); + } + lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen); + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); + rd->nres = 0; + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + TRef tr = J->base[0], trlen = J->base[1], trfill = J->base[2]; + if (tr && trlen) { + tr = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, tr, &rd->argv[0]); + trlen = crec_toint(J, cts, trlen, &rd->argv[1]); + if (trfill) + trfill = crec_toint(J, cts, trfill, &rd->argv[2]); + else + trfill = lj_ir_kint(J, 0); + lj_ir_call(J, IRCALL_memset, tr, trfill, trlen); + emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); + rd->nres = 0; + } /* else: interpreter will throw. */ +} + +void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd) +{ + argv2ctype(J, J->base[0], &rd->argv[0]); + if (tref_iscdata(J->base[1])) { + argv2ctype(J, J->base[1], &rd->argv[1]); + J->postproc = LJ_POST_FIXBOOL; + J->base[0] = TREF_TRUE; + } else { + J->base[0] = TREF_FALSE; + } +} + +void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd) +{ + if (tref_isstr(J->base[0])) { + /* Specialize to the ABI string to make the boolean result a constant. */ + emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0]))); + J->postproc = LJ_POST_FIXBOOL; + J->base[0] = TREF_TRUE; + } /* else: interpreter will throw. */ +} + +/* -- Miscellaneous library functions ------------------------------------- */ + +void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd) +{ + CTState *cts = ctype_ctsG(J2G(J)); + CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->typeid); + if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); + if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { + if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 && + !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) + d = ctype_get(cts, CTID_INT32); + else + d = ctype_get(cts, CTID_DOUBLE); + J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]); + } else { + J->base[0] = TREF_NIL; + } +} + +#undef IR +#undef emitir +#undef emitconv + +#endif diff --git a/src/luajit2/src/lj_crecord.h b/src/luajit2/src/lj_crecord.h new file mode 100644 index 0000000000..fce45afed4 --- /dev/null +++ b/src/luajit2/src/lj_crecord.h @@ -0,0 +1,38 @@ +/* +** Trace recorder for C data operations. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CRECORD_H +#define _LJ_CRECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" +#include "lj_ffrecord.h" + +#if LJ_HASJIT && LJ_HASFFI +LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd); +LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd); +#else +#define recff_cdata_index recff_nyi +#define recff_cdata_call recff_nyi +#define recff_cdata_arith recff_nyi +#define recff_clib_index recff_nyi +#define recff_ffi_new recff_nyi +#define recff_ffi_string recff_nyi +#define recff_ffi_copy recff_nyi +#define recff_ffi_fill recff_nyi +#define recff_ffi_istype recff_nyi +#define recff_ffi_abi recff_nyi +#endif + +#endif diff --git a/src/luajit2/src/lj_ctype.c b/src/luajit2/src/lj_ctype.c new file mode 100644 index 0000000000..85e9a0ba33 --- /dev/null +++ b/src/luajit2/src/lj_ctype.c @@ -0,0 +1,600 @@ +/* +** C type management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include "lj_obj.h" + +#if LJ_HASFFI + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ctype.h" + +/* -- C type definitions -------------------------------------------------- */ + +/* Predefined typedefs. */ +#define CTTDDEF(_) \ + /* Vararg handling. */ \ + _("va_list", P_VOID) \ + _("__builtin_va_list", P_VOID) \ + _("__gnuc_va_list", P_VOID) \ + /* From stddef.h. */ \ + _("ptrdiff_t", INT_PSZ) \ + _("size_t", UINT_PSZ) \ + _("wchar_t", WCHAR) \ + /* Subset of stdint.h. */ \ + _("int8_t", INT8) \ + _("int16_t", INT16) \ + _("int32_t", INT32) \ + _("int64_t", INT64) \ + _("uint8_t", UINT8) \ + _("uint16_t", UINT16) \ + _("uint32_t", UINT32) \ + _("uint64_t", UINT64) \ + _("intptr_t", INT_PSZ) \ + _("uintptr_t", UINT_PSZ) \ + /* End of typedef list. */ + +/* Keywords (only the ones we actually care for). */ +#define CTKWDEF(_) \ + /* Type specifiers. */ \ + _("void", -1, CTOK_VOID) \ + _("_Bool", 1, CTOK_BOOL) \ + _("bool", 1, CTOK_BOOL) \ + _("char", 1, CTOK_CHAR) \ + _("int", 4, CTOK_INT) \ + _("__int8", 1, CTOK_INT) \ + _("__int16", 2, CTOK_INT) \ + _("__int32", 4, CTOK_INT) \ + _("__int64", 8, CTOK_INT) \ + _("float", 4, CTOK_FP) \ + _("double", 8, CTOK_FP) \ + _("long", 0, CTOK_LONG) \ + _("short", 0, CTOK_SHORT) \ + _("_Complex", 0, CTOK_COMPLEX) \ + _("complex", 0, CTOK_COMPLEX) \ + _("__complex", 0, CTOK_COMPLEX) \ + _("__complex__", 0, CTOK_COMPLEX) \ + _("signed", 0, CTOK_SIGNED) \ + _("__signed", 0, CTOK_SIGNED) \ + _("__signed__", 0, CTOK_SIGNED) \ + _("unsigned", 0, CTOK_UNSIGNED) \ + /* Type qualifiers. */ \ + _("const", 0, CTOK_CONST) \ + _("__const", 0, CTOK_CONST) \ + _("__const__", 0, CTOK_CONST) \ + _("volatile", 0, CTOK_VOLATILE) \ + _("__volatile", 0, CTOK_VOLATILE) \ + _("__volatile__", 0, CTOK_VOLATILE) \ + _("restrict", 0, CTOK_RESTRICT) \ + _("__restrict", 0, CTOK_RESTRICT) \ + _("__restrict__", 0, CTOK_RESTRICT) \ + _("inline", 0, CTOK_INLINE) \ + _("__inline", 0, CTOK_INLINE) \ + _("__inline__", 0, CTOK_INLINE) \ + /* Storage class specifiers. */ \ + _("typedef", 0, CTOK_TYPEDEF) \ + _("extern", 0, CTOK_EXTERN) \ + _("static", 0, CTOK_STATIC) \ + _("auto", 0, CTOK_AUTO) \ + _("register", 0, CTOK_REGISTER) \ + /* GCC Attributes. */ \ + _("__extension__", 0, CTOK_EXTENSION) \ + _("__attribute", 0, CTOK_ATTRIBUTE) \ + _("__attribute__", 0, CTOK_ATTRIBUTE) \ + _("asm", 0, CTOK_ASM) \ + _("__asm", 0, CTOK_ASM) \ + _("__asm__", 0, CTOK_ASM) \ + /* MSVC Attributes. */ \ + _("__declspec", 0, CTOK_DECLSPEC) \ + _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \ + _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \ + _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \ + _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \ + _("__ptr32", 4, CTOK_PTRSZ) \ + _("__ptr64", 8, CTOK_PTRSZ) \ + /* Other type specifiers. */ \ + _("struct", 0, CTOK_STRUCT) \ + _("union", 0, CTOK_UNION) \ + _("enum", 0, CTOK_ENUM) \ + /* Operators. */ \ + _("sizeof", 0, CTOK_SIZEOF) \ + _("__alignof", 0, CTOK_ALIGNOF) \ + _("__alignof__", 0, CTOK_ALIGNOF) \ + /* End of keyword list. */ + +/* Type info for predefined types. Size merged in. */ +static CTInfo lj_ctype_typeinfo[] = { +#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)), +#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id), +#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)), +CTTYDEF(CTTYINFODEF) +CTTDDEF(CTTDINFODEF) +CTKWDEF(CTKWINFODEF) +#undef CTTYINFODEF +#undef CTTDINFODEF +#undef CTKWINFODEF + 0 +}; + +/* Predefined type names collected in a single string. */ +static const char * const lj_ctype_typenames = +#define CTTDNAMEDEF(name, id) name "\0" +#define CTKWNAMEDEF(name, sz, cds) name "\0" +CTTDDEF(CTTDNAMEDEF) +CTKWDEF(CTKWNAMEDEF) +#undef CTTDNAMEDEF +#undef CTKWNAMEDEF +; + +#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1) +#define CTTYPETAB_MIN 128 + +/* -- C type interning ---------------------------------------------------- */ + +#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK) +#define ct_hashname(name) \ + (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK) + +/* Create new type element. */ +CTypeID lj_ctype_new(CTState *cts, CType **ctp) +{ + CTypeID id = cts->top; + CType *ct; + lua_assert(cts->L); + if (LJ_UNLIKELY(id >= cts->sizetab)) { + if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); + lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); + } + cts->top = id+1; + *ctp = ct = &cts->tab[id]; + ct->info = 0; + ct->size = 0; + ct->sib = 0; + ct->next = 0; + setgcrefnull(ct->name); + return id; +} + +/* Intern a type element. */ +CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size) +{ + uint32_t h = ct_hashtype(info, size); + CTypeID id = cts->hash[h]; + lua_assert(cts->L); + while (id) { + CType *ct = ctype_get(cts, id); + if (ct->info == info && ct->size == size) + return id; + id = ct->next; + } + id = cts->top; + if (LJ_UNLIKELY(id >= cts->sizetab)) { + if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); + lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType); + } + cts->top = id+1; + cts->tab[id].info = info; + cts->tab[id].size = size; + cts->tab[id].sib = 0; + cts->tab[id].next = cts->hash[h]; + setgcrefnull(cts->tab[id].name); + cts->hash[h] = (CTypeID1)id; + return id; +} + +/* Add type element to hash table. */ +static void ctype_addtype(CTState *cts, CType *ct, CTypeID id) +{ + uint32_t h = ct_hashtype(ct->info, ct->size); + ct->next = cts->hash[h]; + cts->hash[h] = (CTypeID1)id; +} + +/* Add named element to hash table. */ +void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id) +{ + uint32_t h = ct_hashname(gcref(ct->name)); + ct->next = cts->hash[h]; + cts->hash[h] = (CTypeID1)id; +} + +/* Get a C type by name, matching the type mask. */ +CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask) +{ + CTypeID id = cts->hash[ct_hashname(name)]; + while (id) { + CType *ct = ctype_get(cts, id); + if (gcref(ct->name) == obj2gco(name) && + ((tmask >> ctype_type(ct->info)) & 1)) { + *ctp = ct; + return id; + } + id = ct->next; + } + *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */ + return 0; +} + +/* Get a struct/union/enum/function field by name. */ +CType *lj_ctype_getfield(CTState *cts, CType *ct, GCstr *name, CTSize *ofs) +{ + while (ct->sib) { + ct = ctype_get(cts, ct->sib); + if (gcref(ct->name) == obj2gco(name)) { + *ofs = ct->size; + return ct; + } + if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) { + CType *fct = lj_ctype_getfield(cts, ctype_child(cts, ct), name, ofs); + if (fct) { + *ofs += ct->size; + return fct; + } + } + } + return NULL; /* Not found. */ +} + +/* -- C type information -------------------------------------------------- */ + +/* Follow references and get raw type for a C type ID. */ +CType *lj_ctype_rawref(CTState *cts, CTypeID id) +{ + CType *ct = ctype_get(cts, id); + while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) + ct = ctype_child(cts, ct); + return ct; +} + +/* Get size for a C type ID. Does NOT support VLA/VLS. */ +CTSize lj_ctype_size(CTState *cts, CTypeID id) +{ + CType *ct = ctype_raw(cts, id); + return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID; +} + +/* Get size for a variable-length C type. Does NOT support other C types. */ +CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem) +{ + uint64_t xsz = 0; + if (ctype_isstruct(ct->info)) { + CTypeID arrid = 0, fid = ct->sib; + xsz = ct->size; /* Add the struct size. */ + while (fid) { + CType *ctf = ctype_get(cts, fid); + if (ctype_type(ctf->info) == CT_FIELD) + arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */ + fid = ctf->sib; + } + ct = ctype_raw(cts, arrid); + } + lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */ + ct = ctype_rawchild(cts, ct); /* Get array element. */ + lua_assert(ctype_hassize(ct->info)); + /* Calculate actual size of VLA and check for overflow. */ + xsz += (uint64_t)ct->size * nelem; + return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; +} + +/* Get type, qualifiers, size and alignment for a C type ID. */ +CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp) +{ + CTInfo qual = 0; + CType *ct = ctype_get(cts, id); + for (;;) { + CTInfo info = ct->info; + if (ctype_isenum(info)) { + /* Follow child. Need to look at its attributes, too. */ + } else if (ctype_isattrib(info)) { + if (ctype_isxattrib(info, CTA_QUAL)) + qual |= ct->size; + else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED)) + qual |= CTFP_ALIGNED + CTALIGN(ct->size); + } else { + if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); + qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); + lua_assert(ctype_hassize(info) || ctype_isfunc(info)); + *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; + break; + } + ct = ctype_get(cts, ctype_cid(info)); + } + return qual; +} + +/* Get ctype metamethod. */ +cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm) +{ + CType *ct = ctype_get(cts, id); + cTValue *tv; + while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) { + id = ctype_cid(ct->info); + ct = ctype_get(cts, id); + } + tv = lj_tab_getint(cts->metatype, (int32_t)id); + if (tv && tvistab(tv) && + (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv)) + return tv; + return NULL; +} + +/* -- C type representation ----------------------------------------------- */ + +/* Fixed max. length of a C type representation. */ +#define CTREPR_MAX 512 + +typedef struct CTRepr { + char *pb, *pe; + CTState *cts; + lua_State *L; + int needsp; + int ok; + char buf[CTREPR_MAX]; +} CTRepr; + +/* Prepend string. */ +static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len) +{ + char *p = ctr->pb; + if (ctr->buf + len+1 > p) { ctr->ok = 0; return; } + if (ctr->needsp) *--p = ' '; + ctr->needsp = 1; + p -= len; + while (len-- > 0) p[len] = str[len]; + ctr->pb = p; +} + +#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1) + +/* Prepend char. */ +static void ctype_prepc(CTRepr *ctr, int c) +{ + if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; } + *--ctr->pb = c; +} + +/* Prepend number. */ +static void ctype_prepnum(CTRepr *ctr, uint32_t n) +{ + char *p = ctr->pb; + if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + ctr->pb = p; + ctr->needsp = 0; +} + +/* Append char. */ +static void ctype_appc(CTRepr *ctr, int c) +{ + if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; } + *ctr->pe++ = c; +} + +/* Append number. */ +static void ctype_appnum(CTRepr *ctr, uint32_t n) +{ + char buf[10]; + char *p = buf+sizeof(buf); + char *q = ctr->pe; + if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + do { *q++ = *p++; } while (p < buf+sizeof(buf)); + ctr->pe = q; +} + +/* Prepend qualifiers. */ +static void ctype_prepqual(CTRepr *ctr, CTInfo info) +{ + if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile"); + if ((info & CTF_CONST)) ctype_preplit(ctr, "const"); +} + +/* Prepend named type. */ +static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t) +{ + if (gcref(ct->name)) { + GCstr *str = gco2str(gcref(ct->name)); + ctype_prepstr(ctr, strdata(str), str->len); + } else { + if (ctr->needsp) ctype_prepc(ctr, ' '); + ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct)); + ctr->needsp = 1; + } + ctype_prepstr(ctr, t, (MSize)strlen(t)); + ctype_prepqual(ctr, qual); +} + +static void ctype_repr(CTRepr *ctr, CTypeID id) +{ + CType *ct = ctype_get(ctr->cts, id); + CTInfo qual = 0; + int ptrto = 0; + for (;;) { + CTInfo info = ct->info; + CTSize size = ct->size; + switch (ctype_type(info)) { + case CT_NUM: + if ((info & CTF_BOOL)) { + ctype_preplit(ctr, "bool"); + } else if ((info & CTF_FP)) { + if (size == sizeof(double)) ctype_preplit(ctr, "double"); + else if (size == sizeof(float)) ctype_preplit(ctr, "float"); + else ctype_preplit(ctr, "long double"); + } else if (size == 1) { + if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char"); + else if (CTF_UCHAR) ctype_preplit(ctr, "signed char"); + else ctype_preplit(ctr, "unsigned char"); + } else if (size < 8) { + if (size == 4) ctype_preplit(ctr, "int"); + else ctype_preplit(ctr, "short"); + if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned"); + } else { + ctype_preplit(ctr, "_t"); + ctype_prepnum(ctr, size*8); + ctype_preplit(ctr, "int"); + if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u'); + } + ctype_prepqual(ctr, (qual|info)); + return; + case CT_VOID: + ctype_preplit(ctr, "void"); + ctype_prepqual(ctr, (qual|info)); + return; + case CT_STRUCT: + ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct"); + return; + case CT_ENUM: + ctype_preptype(ctr, ct, qual, "enum"); + return; + case CT_ATTRIB: + if (ctype_attrib(info) == CTA_QUAL) qual |= size; + break; + case CT_PTR: + if ((info & CTF_REF)) { + ctype_prepc(ctr, '&'); + } else { + ctype_prepqual(ctr, (qual|info)); + if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32"); + ctype_prepc(ctr, '*'); + } + qual = 0; + ptrto = 1; + ctr->needsp = 1; + break; + case CT_ARRAY: + if (ctype_isrefarray(info)) { + ctr->needsp = 1; + if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } + ctype_appc(ctr, '['); + if (size != CTSIZE_INVALID) { + CTSize csize = ctype_child(ctr->cts, ct)->size; + ctype_appnum(ctr, csize ? size/csize : 0); + } else if ((info & CTF_VLA)) { + ctype_appc(ctr, '?'); + } + ctype_appc(ctr, ']'); + } else if ((info & CTF_COMPLEX)) { + if (size == 2*sizeof(float)) ctype_preplit(ctr, "float"); + ctype_preplit(ctr, "complex"); + return; + } else { + ctype_preplit(ctr, ")))"); + ctype_prepnum(ctr, size); + ctype_preplit(ctr, "__attribute__((vector_size("); + } + break; + case CT_FUNC: + ctr->needsp = 1; + if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); } + ctype_appc(ctr, '('); + ctype_appc(ctr, ')'); + break; + default: + lua_assert(0); + break; + } + ct = ctype_get(ctr->cts, ctype_cid(info)); + } +} + +/* Return a printable representation of a C type. */ +GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name) +{ + global_State *g = G(L); + CTRepr ctr; + ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2]; + ctr.cts = ctype_ctsG(g); + ctr.L = L; + ctr.ok = 1; + ctr.needsp = 0; + if (name) ctype_prepstr(&ctr, strdata(name), name->len); + ctype_repr(&ctr, id); + if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?"); + return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb); +} + +/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */ +GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned) +{ + char buf[1+20+3]; + char *p = buf+sizeof(buf); + int sign = 0; + *--p = 'L'; *--p = 'L'; + if (isunsigned) { + *--p = 'U'; + } else if ((int64_t)n < 0) { + n = (uint64_t)-(int64_t)n; + sign = 1; + } + do { *--p = (char)('0' + n % 10); } while (n /= 10); + if (sign) *--p = '-'; + return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p)); +} + +/* Convert complex to string with 'i' or 'I' suffix. */ +GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size) +{ + char buf[2*LJ_STR_NUMBUF+2+1]; + TValue re, im; + size_t len; + if (size == 2*sizeof(double)) { + re.n = *(double *)sp; im.n = ((double *)sp)[1]; + } else { + re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1]; + } + len = lj_str_bufnum(buf, &re); + if (!(im.u32.hi & 0x80000000u) || im.n != im.n) buf[len++] = '+'; + len += lj_str_bufnum(buf+len, &im); + buf[len] = buf[len-1] >= 'a' ? 'I' : 'i'; + return lj_str_new(L, buf, len+1); +} + +/* -- C type state -------------------------------------------------------- */ + +/* Initialize C type table and state. */ +CTState *lj_ctype_init(lua_State *L) +{ + CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState); + CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType); + const char *name = lj_ctype_typenames; + CTypeID id; + memset(cts, 0, sizeof(CTState)); + cts->tab = ct; + cts->sizetab = CTTYPETAB_MIN; + cts->top = CTTYPEINFO_NUM; + cts->L = NULL; + cts->g = G(L); + for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) { + CTInfo info = lj_ctype_typeinfo[id]; + ct->size = (CTSize)((int32_t)(info << 16) >> 26); + ct->info = info & 0xffff03ffu; + if (ctype_type(info) == CT_KW || ctype_istypedef(info)) { + size_t len = strlen(name); + GCstr *str = lj_str_new(L, name, len); + ctype_setname(ct, str); + name += len+1; + lj_ctype_addname(cts, ct, id); + } else { + setgcrefnull(ct->name); + if (!ctype_isenum(info)) ctype_addtype(cts, ct, id); + } + } + setmref(G(L)->ctype_state, cts); + return cts; +} + +/* Free C type table and state. */ +void lj_ctype_freestate(global_State *g) +{ + CTState *cts = ctype_ctsG(g); + if (cts) { + lj_mem_freevec(g, cts->tab, cts->sizetab, CType); + lj_mem_freet(g, cts); + } +} + +#endif diff --git a/src/luajit2/src/lj_ctype.h b/src/luajit2/src/lj_ctype.h new file mode 100644 index 0000000000..f7a7121b6c --- /dev/null +++ b/src/luajit2/src/lj_ctype.h @@ -0,0 +1,439 @@ +/* +** C type management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_CTYPE_H +#define _LJ_CTYPE_H + +#include "lj_obj.h" +#include "lj_gc.h" + +#if LJ_HASFFI + +/* -- C type definitions -------------------------------------------------- */ + +/* C type numbers. Highest 4 bits of C type info. ORDER CT. */ +enum { + /* Externally visible types. */ + CT_NUM, /* Integer or floating-point numbers. */ + CT_STRUCT, /* Struct or union. */ + CT_PTR, /* Pointer or reference. */ + CT_ARRAY, /* Array or complex type. */ + CT_MAYCONVERT = CT_ARRAY, + CT_VOID, /* Void type. */ + CT_ENUM, /* Enumeration. */ + CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */ + CT_FUNC, /* Function. */ + CT_TYPEDEF, /* Typedef. */ + CT_ATTRIB, /* Miscellaneous attributes. */ + /* Internal element types. */ + CT_FIELD, /* Struct/union field or function parameter. */ + CT_BITFIELD, /* Struct/union bitfield. */ + CT_CONSTVAL, /* Constant value. */ + CT_EXTERN, /* External reference. */ + CT_KW /* Keyword. */ +}; + +LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR); +LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT); + +/* +** ---------- info ------------ +** |type flags... A cid | size | sib | next | name | +** +----------------------------+--------+-------+-------+-------+-- +** |NUM BFvcUL.. A | size | | type | | +** |STRUCT ..vcU..V A | size | field | name? | name? | +** |PTR ..vcR... A cid | size | | type | | +** |ARRAY VCvc...V A cid | size | | type | | +** |VOID ..vc.... A | size | | type | | +** |ENUM A cid | size | const | name? | name? | +** |FUNC ....VS.. cc cid | nargs | field | name? | name? | +** |TYPEDEF cid | | | name | name | +** |ATTRIB attrnum cid | attr | sib? | type? | | +** |FIELD cid | offset | field | | name? | +** |BITFIELD B.vcU csz bsz pos | offset | field | | name? | +** |CONSTVAL c cid | value | const | name | name | +** |EXTERN cid | | sib? | name | name | +** |KW tok | size | | name | name | +** +----------------------------+--------+-------+-------+-------+-- +** ^^ ^^--- bits used for C type conversion dispatch +*/ + +/* C type info flags. TFFArrrr */ +#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */ +#define CTF_FP 0x04000000u /* Floating-point: NUM. */ +#define CTF_CONST 0x02000000u /* Const qualifier. */ +#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */ +#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */ +#define CTF_LONG 0x00400000u /* Long: NUM. */ +#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */ +#define CTF_REF 0x00800000u /* Reference: PTR. */ +#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */ +#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */ +#define CTF_UNION 0x00800000u /* Union: STRUCT. */ +#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */ +#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */ + +#define CTF_QUAL (CTF_CONST|CTF_VOLATILE) +#define CTF_ALIGN (CTMASK_ALIGN<<CTSHIFT_ALIGN) +#define CTF_UCHAR ((char)-1 > 0 ? CTF_UNSIGNED : 0) + +/* Flags used in parser. .F.Ammvf cp->attr */ +#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */ +#define CTFP_PACKED 0x00000002u /* cp->attr */ +/* ...C...f cp->fattr */ +#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */ + +/* C type info bitfields. */ +#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */ +#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */ +#define CTSHIFT_NUM 28 +#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */ +#define CTSHIFT_ALIGN 16 +#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */ +#define CTSHIFT_ATTRIB 16 +#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */ +#define CTSHIFT_CCONV 16 +#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */ +#define CTSHIFT_REGPARM 18 +/* Bitfields only used in parser. */ +#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */ +#define CTSHIFT_VSIZEP 4 +#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */ +#define CTSHIFT_MSIZEP 8 + +/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */ +#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */ +#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */ +#define CTMASK_BITPOS 127 +#define CTMASK_BITBSZ 127 +#define CTMASK_BITCSZ 127 +#define CTSHIFT_BITPOS 0 +#define CTSHIFT_BITBSZ 8 +#define CTSHIFT_BITCSZ 16 + +#define CTF_INSERT(info, field, val) \ + info = (info & ~(CTMASK_##field<<CTSHIFT_##field)) | \ + (((CTSize)(val) & CTMASK_##field) << CTSHIFT_##field) + +/* Calling conventions. */ +enum { CTCC_CDECL, CTCC_THISCALL, CTCC_FASTCALL, CTCC_STDCALL }; + +/* Attribute numbers. */ +enum { + CTA_NONE, /* Ignored attribute. Must be zero. */ + CTA_QUAL, /* Unmerged qualifiers. */ + CTA_ALIGN, /* Alignment override. */ + CTA_SUBTYPE, /* Transparent sub-type. */ + CTA_REDIR, /* Redirected symbol name. */ + CTA_BAD, /* To catch bad IDs. */ + CTA__MAX +}; + +/* Special sizes. */ +#define CTSIZE_INVALID 0xffffffffu + +typedef uint32_t CTInfo; /* Type info. */ +typedef uint32_t CTSize; /* Type size. */ +typedef uint32_t CTypeID; /* Type ID. */ +typedef uint16_t CTypeID1; /* Minimum-sized type ID. */ + +/* C type table element. */ +typedef struct CType { + CTInfo info; /* Type info. */ + CTSize size; /* Type size or other info. */ + CTypeID1 sib; /* Sibling element. */ + CTypeID1 next; /* Next element in hash chain. */ + GCRef name; /* Element name (GCstr). */ +} CType; + +#define CTHASH_SIZE 128 /* Number of hash anchors. */ +#define CTHASH_MASK (CTHASH_SIZE-1) + +/* C type state. */ +typedef struct CTState { + CType *tab; /* C type table. */ + CTypeID top; /* Current top of C type table. */ + MSize sizetab; /* Size of C type table. */ + lua_State *L; /* Lua state (needed for errors and allocations). */ + global_State *g; /* Global state. */ + GCtab *finalizer; /* Map of cdata to finalizer. */ + GCtab *metatype; /* Map of CTypeID to metatable. */ + CTypeID1 hash[CTHASH_SIZE]; /* Hash anchors for C type table. */ +} CTState; + +#define CTINFO(ct, flags) (((CTInfo)(ct) << CTSHIFT_NUM) + (flags)) +#define CTALIGN(al) ((CTSize)(al) << CTSHIFT_ALIGN) +#define CTATTRIB(at) ((CTInfo)(at) << CTSHIFT_ATTRIB) + +#define ctype_type(info) ((info) >> CTSHIFT_NUM) +#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID)) +#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN) +#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB) +#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS) +#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ) +#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ) +#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP) +#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP) +#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV) + +/* Simple type checks. */ +#define ctype_isnum(info) (ctype_type((info)) == CT_NUM) +#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID) +#define ctype_isptr(info) (ctype_type((info)) == CT_PTR) +#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY) +#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT) +#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC) +#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM) +#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF) +#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB) +#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD) +#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD) +#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL) +#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN) +#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE) + +/* Combined type and flag checks. */ +#define ctype_isinteger(info) \ + (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0)) +#define ctype_isinteger_or_bool(info) \ + (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0)) +#define ctype_isbool(info) \ + (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL)) +#define ctype_isfp(info) \ + (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP)) + +#define ctype_ispointer(info) \ + ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */ +#define ctype_isref(info) \ + (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF)) + +#define ctype_isrefarray(info) \ + (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0)) +#define ctype_isvector(info) \ + (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR)) +#define ctype_iscomplex(info) \ + (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX)) + +#define ctype_isvltype(info) \ + (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<<CTSHIFT_NUM))) == \ + CTINFO(CT_STRUCT, CTF_VLA)) /* VL array or VL struct. */ +#define ctype_isvlarray(info) \ + (((info) & (CTMASK_NUM|CTF_VLA)) == CTINFO(CT_ARRAY, CTF_VLA)) + +#define ctype_isxattrib(info, at) \ + (((info) & (CTMASK_NUM|CTATTRIB(CTMASK_ATTRIB))) == \ + CTINFO(CT_ATTRIB, CTATTRIB(at))) + +/* Target-dependent sizes and alignments. */ +#if LJ_64 +#define CTSIZE_PTR 8 +#define CTALIGN_PTR CTALIGN(3) +#else +#define CTSIZE_PTR 4 +#define CTALIGN_PTR CTALIGN(2) +#endif + +#define CTINFO_REF(ref) \ + CTINFO(CT_PTR, (CTF_CONST|CTF_REF|CTALIGN_PTR) + (ref)) + +#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */ + +/* -- Predefined types ---------------------------------------------------- */ + +/* Target-dependent types. */ +#if LJ_TARGET_PPC +#define CTTYDEFP(_) \ + _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2)) +#else +#define CTTYDEFP(_) +#endif + +/* Common types. */ +#define CTTYDEF(_) \ + _(NONE, 0, CT_ATTRIB, CTATTRIB(CTA_BAD)) \ + _(VOID, -1, CT_VOID, CTALIGN(0)) \ + _(CVOID, -1, CT_VOID, CTF_CONST|CTALIGN(0)) \ + _(BOOL, 1, CT_NUM, CTF_BOOL|CTF_UNSIGNED|CTALIGN(0)) \ + _(CCHAR, 1, CT_NUM, CTF_CONST|CTF_UCHAR|CTALIGN(0)) \ + _(INT8, 1, CT_NUM, CTALIGN(0)) \ + _(UINT8, 1, CT_NUM, CTF_UNSIGNED|CTALIGN(0)) \ + _(INT16, 2, CT_NUM, CTALIGN(1)) \ + _(UINT16, 2, CT_NUM, CTF_UNSIGNED|CTALIGN(1)) \ + _(INT32, 4, CT_NUM, CTALIGN(2)) \ + _(UINT32, 4, CT_NUM, CTF_UNSIGNED|CTALIGN(2)) \ + _(INT64, 8, CT_NUM, CTF_LONG|CTALIGN(3)) \ + _(UINT64, 8, CT_NUM, CTF_UNSIGNED|CTF_LONG|CTALIGN(3)) \ + _(FLOAT, 4, CT_NUM, CTF_FP|CTALIGN(2)) \ + _(DOUBLE, 8, CT_NUM, CTF_FP|CTALIGN(3)) \ + _(COMPLEX_FLOAT, 8, CT_ARRAY, CTF_COMPLEX|CTALIGN(2)|CTID_FLOAT) \ + _(COMPLEX_DOUBLE, 16, CT_ARRAY, CTF_COMPLEX|CTALIGN(3)|CTID_DOUBLE) \ + _(P_VOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_VOID) \ + _(P_CVOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CVOID) \ + _(P_CCHAR, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CCHAR) \ + _(A_CCHAR, -1, CT_ARRAY, CTF_CONST|CTALIGN(0)|CTID_CCHAR) \ + _(CTYPEID, 4, CT_ENUM, CTALIGN(2)|CTID_INT32) \ + CTTYDEFP(_) \ + /* End of type list. */ + +/* Public predefined type IDs. */ +enum { +#define CTTYIDDEF(id, sz, ct, info) CTID_##id, +CTTYDEF(CTTYIDDEF) +#undef CTTYIDDEF + /* Predefined typedefs and keywords follow. */ + CTID_MAX = 65536 +}; + +/* Target-dependent type IDs. */ +#if LJ_64 +#define CTID_INT_PSZ CTID_INT64 +#define CTID_UINT_PSZ CTID_UINT64 +#else +#define CTID_INT_PSZ CTID_INT32 +#define CTID_UINT_PSZ CTID_UINT32 +#endif + +#if LJ_ABI_WIN +#define CTID_WCHAR CTID_UINT16 +#elif LJ_TARGET_PPC +#define CTID_WCHAR CTID_LINT32 +#else +#define CTID_WCHAR CTID_INT32 +#endif + +/* -- C tokens and keywords ----------------------------------------------- */ + +/* C lexer keywords. */ +#define CTOKDEF(_) \ + _(IDENT, "<identifier>") _(STRING, "<string>") \ + _(INTEGER, "<integer>") _(EOF, "<eof>") \ + _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \ + _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->") + +/* Simple declaration specifiers. */ +#define CDSDEF(_) \ + _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \ + _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \ + _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \ + _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER) + +/* C keywords. */ +#define CKWDEF(_) \ + CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \ + _(DECLSPEC) _(CCDECL) _(PTRSZ) \ + _(STRUCT) _(UNION) _(ENUM) \ + _(SIZEOF) _(ALIGNOF) + +/* C token numbers. */ +enum { + CTOK_OFS = 255, +#define CTOKNUM(name, sym) CTOK_##name, +#define CKWNUM(name) CTOK_##name, +CTOKDEF(CTOKNUM) +CKWDEF(CKWNUM) +#undef CTOKNUM +#undef CKWNUM + CTOK_FIRSTDECL = CTOK_VOID, + CTOK_FIRSTSCL = CTOK_TYPEDEF, + CTOK_LASTDECLFLAG = CTOK_REGISTER, + CTOK_LASTDECL = CTOK_ENUM +}; + +/* Declaration specifier flags. */ +enum { +#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)), +CDSDEF(CDSFLAG) +#undef CDSFLAG + CDF__END +}; + +#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER) + +/* -- C type management --------------------------------------------------- */ + +#define ctype_ctsG(g) (mref((g)->ctype_state, CTState)) + +/* Get C type state. */ +static LJ_AINLINE CTState *ctype_cts(lua_State *L) +{ + CTState *cts = ctype_ctsG(G(L)); + cts->L = L; /* Save L for errors and allocations. */ + return cts; +} + +/* Save and restore state of C type table. */ +#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts) +#define LJ_CTYPE_RESTORE(cts) \ + ((cts)->top = savects_.top, \ + memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash))) + +/* Check C type ID for validity when assertions are enabled. */ +static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) +{ + lua_assert(id > 0 && id < cts->top); UNUSED(cts); + return id; +} + +/* Get C type for C type ID. */ +static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id) +{ + return &cts->tab[ctype_check(cts, id)]; +} + +/* Get C type ID for a C type. */ +#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab)) + +/* Get child C type. */ +static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) +{ + lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || + ctype_isbitfield(ct->info))); /* These don't have children. */ + return ctype_get(cts, ctype_cid(ct->info)); +} + +/* Get raw type for a C type ID. */ +static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id) +{ + CType *ct = ctype_get(cts, id); + while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct); + return ct; +} + +/* Get raw type of the child of a C type. */ +static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct) +{ + do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info)); + return ct; +} + +/* Set the name of a C type table element. */ +static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s) +{ + /* NOBARRIER: mark string as fixed -- the C type table is never collected. */ + fixstring(s); + setgcref(ct->name, obj2gco(s)); +} + +LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp); +LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size); +LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id); +LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, + uint32_t tmask); +LJ_FUNC CType *lj_ctype_getfield(CTState *cts, CType *ct, GCstr *name, + CTSize *ofs); +LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id); +LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id); +LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem); +LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp); +LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm); +LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name); +LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned); +LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size); +LJ_FUNC CTState *lj_ctype_init(lua_State *L); +LJ_FUNC void lj_ctype_freestate(global_State *g); + +#endif + +#endif diff --git a/src/luajit2/src/lj_debug.c b/src/luajit2/src/lj_debug.c new file mode 100644 index 0000000000..1c4bac43b6 --- /dev/null +++ b/src/luajit2/src/lj_debug.c @@ -0,0 +1,487 @@ +/* +** Debugging and introspection. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_debug_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" + +/* -- Frames -------------------------------------------------------------- */ + +/* Get frame corresponding to a level. */ +cTValue *lj_debug_frame(lua_State *L, int level, int *size) +{ + cTValue *frame, *nextframe, *bot = tvref(L->stack); + /* Traverse frames backwards. */ + for (nextframe = frame = L->base-1; frame > bot; ) { + if (frame_gc(frame) == obj2gco(L)) + level++; /* Skip dummy frames. See lj_meta_call(). */ + if (level-- == 0) { + *size = (int)(nextframe - frame); + return frame; /* Level found. */ + } + nextframe = frame; + if (frame_islua(frame)) { + frame = frame_prevl(frame); + } else { + if (frame_isvarg(frame)) + level++; /* Skip vararg pseudo-frame. */ + frame = frame_prevd(frame); + } + } + *size = level; + return NULL; /* Level not found. */ +} + +/* Invalid bytecode position. */ +#define NO_BCPOS (~(BCPos)0) + +/* Return bytecode position for function/frame or NO_BCPOS. */ +static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe) +{ + const BCIns *ins; + lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD); + if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ + return NO_BCPOS; + } else if (nextframe == NULL) { /* Lua function on top. */ + void *cf = cframe_raw(L->cframe); + if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf)) + return NO_BCPOS; + ins = cframe_pc(cf); /* Only happens during error/hook handling. */ + } else { + if (frame_islua(nextframe)) { + ins = frame_pc(nextframe); + } else if (frame_iscont(nextframe)) { + ins = frame_contpc(nextframe); + } else { + /* Lua function below errfunc/gc/hook: find cframe to get the PC. */ + void *cf = cframe_raw(L->cframe); + TValue *f = L->base-1; + if (cf == NULL) + return NO_BCPOS; + while (f > nextframe) { + if (frame_islua(f)) { + f = frame_prevl(f); + } else { + if (frame_isc(f)) + cf = cframe_raw(cframe_prev(cf)); + f = frame_prevd(f); + } + } + if (cframe_prev(cf)) + cf = cframe_raw(cframe_prev(cf)); + ins = cframe_pc(cf); + } + } + return proto_bcpos(funcproto(fn), ins) - 1; +} + +/* -- Line numbers -------------------------------------------------------- */ + +/* Get line number for a bytecode position. */ +BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc) +{ + const void *lineinfo = proto_lineinfo(pt); + if (pc < pt->sizebc && lineinfo) { + BCLine first = pt->firstline; + if (pc-- == 0) return first; + if (pt->numline < 256) + return first + (BCLine)((const uint8_t *)lineinfo)[pc]; + else if (pt->numline < 65536) + return first + (BCLine)((const uint16_t *)lineinfo)[pc]; + else + return first + (BCLine)((const uint32_t *)lineinfo)[pc]; + } + return 0; +} + +/* Get line number for function/frame. */ +static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe) +{ + BCPos pc = debug_framepc(L, fn, nextframe); + if (pc != NO_BCPOS) { + GCproto *pt = funcproto(fn); + lua_assert(pc < pt->sizebc); + return lj_debug_line(pt, pc); + } + return -1; +} + +/* -- Variable names ------------------------------------------------------ */ + +/* Read ULEB128 value. */ +static uint32_t debug_read_uleb128(const uint8_t **pp) +{ + const uint8_t *p = *pp; + uint32_t v = *p++; + if (LJ_UNLIKELY(v >= 0x80)) { + int sh = 0; + v &= 0x7f; + do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80); + } + *pp = p; + return v; +} + +/* Get name of a local variable from slot number and PC. */ +static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot) +{ + const uint8_t *p = proto_varinfo(pt); + if (p) { + BCPos lastpc = 0; + for (;;) { + const char *name = (const char *)p; + uint32_t vn = *p++; + BCPos startpc, endpc; + if (vn < VARNAME__MAX) { + if (vn == VARNAME_END) break; /* End of varinfo. */ + } else { + while (*p++) ; /* Skip over variable name string. */ + } + lastpc = startpc = lastpc + debug_read_uleb128(&p); + if (startpc > pc) break; + endpc = startpc + debug_read_uleb128(&p); + if (pc < endpc && slot-- == 0) { + if (vn < VARNAME__MAX) { +#define VARNAMESTR(name, str) str "\0" + name = VARNAMEDEF(VARNAMESTR); +#undef VARNAMESTR + if (--vn) while (*name++ || --vn) ; + } + return name; + } + } + } + return NULL; +} + +/* Get name of local variable from 1-based slot number and function/frame. */ +static TValue *debug_localname(lua_State *L, const lua_Debug *ar, + const char **name, BCReg slot1) +{ + uint32_t offset = (uint32_t)ar->i_ci & 0xffff; + uint32_t size = (uint32_t)ar->i_ci >> 16; + TValue *frame = tvref(L->stack) + offset; + TValue *nextframe = size ? frame + size : NULL; + GCfunc *fn = frame_func(frame); + BCPos pc = debug_framepc(L, fn, nextframe); + if (pc != NO_BCPOS && + (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL) + ; + else if (slot1 > 0 && frame + slot1 < (nextframe ? nextframe : L->top)) + *name = "(*temporary)"; + else + *name = NULL; + return frame+slot1; +} + +/* Get name of upvalue. */ +const char *lj_debug_uvname(GCproto *pt, uint32_t idx) +{ + const uint8_t *p = proto_uvinfo(pt); + lua_assert(idx < pt->sizeuv); + if (!p) return ""; + if (idx) while (*p++ || --idx) ; + return (const char *)p; +} + +/* Get name and value of upvalue. */ +const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp) +{ + if (tvisfunc(o)) { + GCfunc *fn = funcV(o); + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + if (idx < pt->sizeuv) { + *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv); + return lj_debug_uvname(pt, idx); + } + } else { + if (idx < fn->c.nupvalues) { + *tvp = &fn->c.upvalue[idx]; + return ""; + } + } + } + return NULL; +} + +/* Deduce name of an object from slot number and PC. */ +const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot, + const char **name) +{ + const char *lname; +restart: + lname = debug_varname(pt, proto_bcpos(pt, ip), slot); + if (lname != NULL) { *name = lname; return "local"; } + while (--ip > proto_bc(pt)) { + BCIns ins = *ip; + BCOp op = bc_op(ins); + BCReg ra = bc_a(ins); + if (bcmode_a(op) == BCMbase) { + if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins))) + return NULL; + } else if (bcmode_a(op) == BCMdst && ra == slot) { + switch (bc_op(ins)) { + case BC_MOV: + if (ra == slot) { slot = bc_d(ins); goto restart; } + break; + case BC_GGET: + *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins)))); + return "global"; + case BC_TGETS: + *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins)))); + if (ip > proto_bc(pt)) { + BCIns insp = ip[-1]; + if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1 && + bc_d(insp) == bc_b(ins)) + return "method"; + } + return "field"; + case BC_UGET: + *name = lj_debug_uvname(pt, bc_d(ins)); + return "upvalue"; + default: + return NULL; + } + } + } + return NULL; +} + +/* Deduce function name from caller of a frame. */ +const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name) +{ + TValue *pframe; + GCfunc *fn; + BCPos pc; + if (frame_isvarg(frame)) + frame = frame_prevd(frame); + pframe = frame_prev(frame); + fn = frame_func(pframe); + pc = debug_framepc(L, fn, frame); + if (pc != NO_BCPOS) { + GCproto *pt = funcproto(fn); + const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)]; + MMS mm = bcmode_mm(bc_op(*ip)); + if (mm == MM_call) { + BCReg slot = bc_a(*ip); + if (bc_op(*ip) == BC_ITERC) slot -= 3; + return lj_debug_slotname(pt, ip, slot, name); + } else if (mm != MM__MAX) { + *name = strdata(mmname_str(G(L), mm)); + return "metamethod"; + } + } + return NULL; +} + +/* -- Source code locations ----------------------------------------------- */ + +/* Generate shortened source name. */ +void lj_debug_shortname(char *out, GCstr *str) +{ + const char *src = strdata(str); + if (*src == '=') { + strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */ + out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */ + } else if (*src == '@') { /* Output "source", or "...source". */ + size_t len = str->len-1; + src++; /* Skip the `@' */ + if (len >= LUA_IDSIZE) { + src += len-(LUA_IDSIZE-4); /* Get last part of file name. */ + *out++ = '.'; *out++ = '.'; *out++ = '.'; + } + strcpy(out, src); + } else { /* Output [string "string"]. */ + size_t len; /* Length, up to first control char. */ + for (len = 0; len < LUA_IDSIZE-12; len++) + if (((const unsigned char *)src)[len] < ' ') break; + strcpy(out, "[string \""); out += 9; + if (src[len] != '\0') { /* Must truncate? */ + if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15; + strncpy(out, src, len); out += len; + strcpy(out, "..."); out += 3; + } else { + strcpy(out, src); out += len; + } + strcpy(out, "\"]"); + } +} + +/* Add current location of a frame to error message. */ +void lj_debug_addloc(lua_State *L, const char *msg, + cTValue *frame, cTValue *nextframe) +{ + if (frame) { + GCfunc *fn = frame_func(frame); + if (isluafunc(fn)) { + BCLine line = debug_frameline(L, fn, nextframe); + if (line >= 0) { + char buf[LUA_IDSIZE]; + lj_debug_shortname(buf, proto_chunkname(funcproto(fn))); + lj_str_pushf(L, "%s:%d: %s", buf, line, msg); + return; + } + } + } + lj_str_pushf(L, "%s", msg); +} + +/* Push location string for a bytecode position to Lua stack. */ +void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc) +{ + GCstr *name = proto_chunkname(pt); + const char *s = strdata(name); + MSize i, len = name->len; + BCLine line = lj_debug_line(pt, pc); + if (*s == '@') { + s++; len--; + for (i = len; i > 0; i--) + if (s[i] == '/' || s[i] == '\\') { + s += i+1; + break; + } + lj_str_pushf(L, "%s:%d", s, line); + } else if (len > 40) { + lj_str_pushf(L, "%p:%d", pt, line); + } else if (*s == '=') { + lj_str_pushf(L, "%s:%d", s+1, line); + } else { + lj_str_pushf(L, "\"%s\":%d", s, line); + } +} + +/* -- Public debug API ---------------------------------------------------- */ + +/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */ + +LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n) +{ + const char *name; + TValue *o = debug_localname(L, ar, &name, (BCReg)n); + if (name) { + copyTV(L, L->top, o); + incr_top(L); + } + return name; +} + +LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n) +{ + const char *name; + TValue *o = debug_localname(L, ar, &name, (BCReg)n); + if (name) + copyTV(L, o, L->top-1); + L->top--; + return name; +} + +LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar) +{ + int status = 1; + TValue *frame = NULL; + TValue *nextframe = NULL; + GCfunc *fn; + if (*what == '>') { + TValue *func = L->top - 1; + api_check(L, tvisfunc(func)); + fn = funcV(func); + L->top--; + what++; + } else { + uint32_t offset = (uint32_t)ar->i_ci & 0xffff; + uint32_t size = (uint32_t)ar->i_ci >> 16; + lua_assert(offset != 0); + frame = tvref(L->stack) + offset; + if (size) nextframe = frame + size; + lua_assert(frame <= tvref(L->maxstack) && + (!nextframe || nextframe <= tvref(L->maxstack))); + fn = frame_func(frame); + lua_assert(fn->c.gct == ~LJ_TFUNC); + } + for (; *what; what++) { + if (*what == 'S') { + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + BCLine firstline = pt->firstline; + GCstr *name = proto_chunkname(pt); + ar->source = strdata(name); + lj_debug_shortname(ar->short_src, name); + ar->linedefined = (int)firstline; + ar->lastlinedefined = (int)(firstline + pt->numline); + ar->what = firstline ? "Lua" : "main"; + } else { + ar->source = "=[C]"; + ar->short_src[0] = '['; + ar->short_src[1] = 'C'; + ar->short_src[2] = ']'; + ar->short_src[3] = '\0'; + ar->linedefined = -1; + ar->lastlinedefined = -1; + ar->what = "C"; + } + } else if (*what == 'l') { + ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1; + } else if (*what == 'u') { + ar->nups = fn->c.nupvalues; + } else if (*what == 'n') { + ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL; + if (ar->namewhat == NULL) { + ar->namewhat = ""; + ar->name = NULL; + } + } else if (*what == 'f') { + setfuncV(L, L->top, fn); + incr_top(L); + } else if (*what == 'L') { + if (isluafunc(fn)) { + GCtab *t = lj_tab_new(L, 0, 0); + GCproto *pt = funcproto(fn); + const void *lineinfo = proto_lineinfo(pt); + if (lineinfo) { + BCLine first = pt->firstline; + int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4; + MSize i, szl = pt->sizebc-1; + for (i = 0; i < szl; i++) { + BCLine line = first + + (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] : + sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] : + (BCLine)((const uint32_t *)lineinfo)[i]); + setboolV(lj_tab_setint(L, t, line), 1); + } + } + settabV(L, L->top, t); + } else { + setnilV(L->top); + } + incr_top(L); + } else { + status = 0; /* Bad option. */ + } + } + return status; +} + +LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar) +{ + int size; + cTValue *frame = lj_debug_frame(L, level, &size); + if (frame) { + ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack)); + return 1; + } else { + ar->i_ci = level - size; + return 0; + } +} + diff --git a/src/luajit2/src/lj_debug.h b/src/luajit2/src/lj_debug.h new file mode 100644 index 0000000000..f82fdfedd6 --- /dev/null +++ b/src/luajit2/src/lj_debug.h @@ -0,0 +1,41 @@ +/* +** Debugging and introspection. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DEBUG_H +#define _LJ_DEBUG_H + +#include "lj_obj.h" + +LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size); +LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc); +LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx); +LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp); +LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc, + BCReg slot, const char **name); +LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame, + const char **name); +LJ_FUNC void lj_debug_shortname(char *out, GCstr *str); +LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg, + cTValue *frame, cTValue *nextframe); +LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc); + +/* Fixed internal variable names. */ +#define VARNAMEDEF(_) \ + _(FOR_IDX, "(for index)") \ + _(FOR_STOP, "(for limit)") \ + _(FOR_STEP, "(for step)") \ + _(FOR_GEN, "(for generator)") \ + _(FOR_STATE, "(for state)") \ + _(FOR_CTL, "(for control)") + +enum { + VARNAME_END, +#define VARNAMEENUM(name, str) VARNAME_##name, + VARNAMEDEF(VARNAMEENUM) +#undef VARNAMEENUM + VARNAME__MAX +}; + +#endif diff --git a/src/luajit2/src/lj_def.h b/src/luajit2/src/lj_def.h new file mode 100644 index 0000000000..78d988ea27 --- /dev/null +++ b/src/luajit2/src/lj_def.h @@ -0,0 +1,305 @@ +/* +** LuaJIT common internal definitions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DEF_H +#define _LJ_DEF_H + +#include "lua.h" + +#if defined(_MSC_VER) +/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */ +typedef __int8 int8_t; +typedef __int16 int16_t; +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#ifdef _WIN64 +typedef __int64 intptr_t; +typedef unsigned __int64 uintptr_t; +#else +typedef __int32 intptr_t; +typedef unsigned __int32 uintptr_t; +#endif +#elif defined(__symbian__) +/* Cough. */ +typedef signed char int8_t; +typedef short int int16_t; +typedef int int32_t; +typedef long long int64_t; +typedef unsigned char uint8_t; +typedef unsigned short int uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef int intptr_t; +typedef unsigned int uintptr_t; +#else +#include <stdint.h> +#endif + +/* Needed everywhere. */ +#include <string.h> +#include <stdlib.h> + +/* Various VM limits. */ +#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */ +#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */ +#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */ +#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */ + +#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */ +#define LJ_MAX_HBITS 26 /* Max. hash bits. */ +#define LJ_MAX_ABITS 28 /* Max. bits of array key. */ +#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */ +#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */ + +#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */ +#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */ +#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ +#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ +#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */ +#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */ + +#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ +#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */ + +/* Minimum table/buffer sizes. */ +#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */ +#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */ +#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */ +#define LJ_MIN_SBUF 32 /* Min. string buffer length. */ +#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */ +#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */ +#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */ + +/* JIT compiler limits. */ +#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */ +#define LJ_MAX_PHI 32 /* Max. # of PHIs for a loop. */ +#define LJ_MAX_EXITSTUBGR 8 /* Max. # of exit stub groups. */ + +/* Various macros. */ +#ifndef UNUSED +#define UNUSED(x) ((void)(x)) /* to avoid warnings */ +#endif + +#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo) +#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p)) +#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p)) + +#define checki8(x) ((x) == (int32_t)(int8_t)(x)) +#define checku8(x) ((x) == (int32_t)(uint8_t)(x)) +#define checki16(x) ((x) == (int32_t)(int16_t)(x)) +#define checku16(x) ((x) == (int32_t)(uint16_t)(x)) +#define checki32(x) ((x) == (int32_t)(x)) +#define checku32(x) ((x) == (uint32_t)(x)) +#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x)) + +/* Every half-decent C compiler transforms this into a rotate instruction. */ +#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(8*sizeof(x)-(n)))) +#define lj_ror(x, n) (((x)<<(8*sizeof(x)-(n))) | ((x)>>(n))) + +/* A really naive Bloom filter. But sufficient for our needs. */ +typedef uintptr_t BloomFilter; +#define BLOOM_MASK (8*sizeof(BloomFilter) - 1) +#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK)) +#define bloomset(b, x) ((b) |= bloombit((x))) +#define bloomtest(b, x) ((b) & bloombit((x))) + +#if defined(__GNUC__) + +#define LJ_NORET __attribute__((noreturn)) +#define LJ_ALIGN(n) __attribute__((aligned(n))) +#define LJ_INLINE inline +#define LJ_AINLINE inline __attribute__((always_inline)) +#define LJ_NOINLINE __attribute__((noinline)) + +#if defined(__ELF__) || defined(__MACH__) +#if !((defined(__sun__) && defined(__svr4__)) || defined(__solaris__)) +#define LJ_NOAPI extern __attribute__((visibility("hidden"))) +#endif +#endif + +/* Note: it's only beneficial to use fastcall on x86 and then only for up to +** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only +** indirect calls and related tail-called C functions are marked as fastcall. +*/ +#if defined(__i386__) +#define LJ_FASTCALL __attribute__((fastcall)) +#endif + +#define LJ_LIKELY(x) __builtin_expect(!!(x), 1) +#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0) + +#define lj_ffs(x) ((uint32_t)__builtin_ctz(x)) +/* Don't ask ... */ +#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__)) +static LJ_AINLINE uint32_t lj_fls(uint32_t x) +{ + uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r; +} +#else +#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31)) +#endif + +#if defined(__arm__) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + uint32_t r; +#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\ + __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ + __asm__("rev %0, %1" : "=r" (r) : "r" (x)); + return r; +#else +#ifdef __thumb__ + r = x ^ lj_ror(x, 16); +#else + __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x)); +#endif + return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8); +#endif +} + +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); +} +#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + return (uint32_t)__builtin_bswap32((int32_t)x); +} + +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return (uint64_t)__builtin_bswap64((int64_t)x); +} +#elif defined(__i386__) || defined(__x86_64__) +static LJ_AINLINE uint32_t lj_bswap(uint32_t x) +{ + uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; +} + +#if defined(__i386__) +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); +} +#else +static LJ_AINLINE uint64_t lj_bswap64(uint64_t x) +{ + uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r; +} +#endif +#else +#error "missing define for lj_bswap()" +#endif + +typedef union __attribute__((packed)) Unaligned16 { + uint16_t u; + uint8_t b[2]; +} Unaligned16; + +typedef union __attribute__((packed)) Unaligned32 { + uint32_t u; + uint8_t b[4]; +} Unaligned32; + +/* Unaligned load of uint16_t. */ +static LJ_AINLINE uint16_t lj_getu16(const void *p) +{ + return ((const Unaligned16 *)p)->u; +} + +/* Unaligned load of uint32_t. */ +static LJ_AINLINE uint32_t lj_getu32(const void *p) +{ + return ((const Unaligned32 *)p)->u; +} + +#elif defined(_MSC_VER) + +#define LJ_NORET __declspec(noreturn) +#define LJ_ALIGN(n) __declspec(align(n)) +#define LJ_INLINE __inline +#define LJ_AINLINE __forceinline +#define LJ_NOINLINE __declspec(noinline) +#if defined(_M_IX86) +#define LJ_FASTCALL __fastcall +#endif + +static LJ_AINLINE uint32_t lj_ffs(uint32_t x) +{ + uint32_t r; _BitScanForward(&r, x); return r; +} + +static LJ_AINLINE uint32_t lj_fls(uint32_t x) +{ + uint32_t r; _BitScanReverse(&r, x); return r; +} + +#define lj_bswap(x) (_byteswap_ulong((x))) +#define lj_bswap64(x) (_byteswap_uint64((x))) + +/* MSVC is only supported on x86/x64, where unaligned loads are always ok. */ +#define lj_getu16(p) (*(uint16_t *)(p)) +#define lj_getu32(p) (*(uint32_t *)(p)) + +#else +#error "missing defines for your compiler" +#endif + +/* Optional defines. */ +#ifndef LJ_FASTCALL +#define LJ_FASTCALL +#endif +#ifndef LJ_NORET +#define LJ_NORET +#endif +#ifndef LJ_NOAPI +#define LJ_NOAPI extern +#endif +#ifndef LJ_LIKELY +#define LJ_LIKELY(x) (x) +#define LJ_UNLIKELY(x) (x) +#endif + +/* Attributes for internal functions. */ +#define LJ_DATA LJ_NOAPI +#define LJ_DATADEF +#define LJ_ASMF LJ_NOAPI +#define LJ_FUNCA LJ_NOAPI +#if defined(ljamalg_c) +#define LJ_FUNC static +#else +#define LJ_FUNC LJ_NOAPI +#endif +#define LJ_FUNC_NORET LJ_FUNC LJ_NORET +#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET +#define LJ_ASMF_NORET LJ_ASMF LJ_NORET + +/* Runtime assertions. */ +#ifdef lua_assert +#define check_exp(c, e) (lua_assert(c), (e)) +#define api_check(l, e) lua_assert(e) +#else +#define lua_assert(c) ((void)0) +#define check_exp(c, e) (e) +#define api_check luai_apicheck +#endif + +/* Static assertions. */ +#define LJ_ASSERT_NAME2(name, line) name ## line +#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line) +#ifdef __COUNTER__ +#define LJ_STATIC_ASSERT(cond) \ + extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) +#else +#define LJ_STATIC_ASSERT(cond) \ + extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) +#endif + +#endif diff --git a/src/luajit2/src/lj_dispatch.c b/src/luajit2/src/lj_dispatch.c new file mode 100644 index 0000000000..c29cad46f3 --- /dev/null +++ b/src/luajit2/src/lj_dispatch.c @@ -0,0 +1,459 @@ +/* +** Instruction dispatch handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_dispatch_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ff.h" +#if LJ_HASJIT +#include "lj_jit.h" +#endif +#include "lj_trace.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "luajit.h" + +/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */ +LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC); + +/* -- Dispatch table management ------------------------------------------- */ + +/* Initialize instruction dispatch table and hot counters. */ +void lj_dispatch_init(GG_State *GG) +{ + uint32_t i; + ASMFunction *disp = GG->dispatch; + for (i = 0; i < GG_LEN_SDISP; i++) + disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]); + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = makeasmfunc(lj_bc_ofs[i]); + /* The JIT engine is off by default. luaopen_jit() turns it on. */ + disp[BC_FORL] = disp[BC_IFORL]; + disp[BC_ITERL] = disp[BC_IITERL]; + disp[BC_LOOP] = disp[BC_ILOOP]; + disp[BC_FUNCF] = disp[BC_IFUNCF]; + disp[BC_FUNCV] = disp[BC_IFUNCV]; + GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0); + for (i = 0; i < GG_NUM_ASMFF; i++) + GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0); +} + +#if LJ_HASJIT +/* Initialize hotcount table. */ +void lj_dispatch_init_hotcount(global_State *g) +{ + HotCount start = (HotCount)G2J(g)->param[JIT_P_hotloop]; + HotCount *hotcount = G2GG(g)->hotcount; + uint32_t i; + for (i = 0; i < HOTCOUNT_SIZE; i++) + hotcount[i] = start; +} +#endif + +/* Internal dispatch mode bits. */ +#define DISPMODE_JIT 0x01 /* JIT compiler on. */ +#define DISPMODE_REC 0x02 /* Recording active. */ +#define DISPMODE_INS 0x04 /* Override instruction dispatch. */ +#define DISPMODE_CALL 0x08 /* Override call dispatch. */ +#define DISPMODE_RET 0x08 /* Override return dispatch. */ + +/* Update dispatch table depending on various flags. */ +void lj_dispatch_update(global_State *g) +{ + uint8_t oldmode = g->dispatchmode; + uint8_t mode = 0; +#if LJ_HASJIT + mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0; + mode |= G2J(g)->state != LJ_TRACE_IDLE ? + (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0; +#endif + mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0; + mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0; + mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0; + if (oldmode != mode) { /* Mode changed? */ + ASMFunction *disp = G2GG(g)->dispatch; + ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv; + g->dispatchmode = mode; + + /* Hotcount if JIT is on, but not while recording. */ + if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) { + f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]); + f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]); + f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]); + f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]); + f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]); + } else { /* Otherwise use the non-hotcounting instructions. */ + f_forl = disp[GG_LEN_DDISP+BC_IFORL]; + f_iterl = disp[GG_LEN_DDISP+BC_IITERL]; + f_loop = disp[GG_LEN_DDISP+BC_ILOOP]; + f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]); + f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]); + } + /* Init static counting instruction dispatch first (may be copied below). */ + disp[GG_LEN_DDISP+BC_FORL] = f_forl; + disp[GG_LEN_DDISP+BC_ITERL] = f_iterl; + disp[GG_LEN_DDISP+BC_LOOP] = f_loop; + + /* Set dynamic instruction dispatch. */ + if ((oldmode ^ mode) & (DISPMODE_REC|DISPMODE_INS)) { + /* Need to update the whole table. */ + if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { /* No ins dispatch? */ + /* Copy static dispatch table to dynamic dispatch table. */ + memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction)); + /* Overwrite with dynamic return dispatch. */ + if ((mode & DISPMODE_RET)) { + disp[BC_RETM] = lj_vm_rethook; + disp[BC_RET] = lj_vm_rethook; + disp[BC_RET0] = lj_vm_rethook; + disp[BC_RET1] = lj_vm_rethook; + } + } else { + /* The recording dispatch also checks for hooks. */ + ASMFunction f = (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook; + uint32_t i; + for (i = 0; i < GG_LEN_SDISP; i++) + disp[i] = f; + } + } else if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { + /* Otherwise set dynamic counting ins. */ + disp[BC_FORL] = f_forl; + disp[BC_ITERL] = f_iterl; + disp[BC_LOOP] = f_loop; + /* Set dynamic return dispatch. */ + if ((mode & DISPMODE_RET)) { + disp[BC_RETM] = lj_vm_rethook; + disp[BC_RET] = lj_vm_rethook; + disp[BC_RET0] = lj_vm_rethook; + disp[BC_RET1] = lj_vm_rethook; + } else { + disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM]; + disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET]; + disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0]; + disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1]; + } + } + + /* Set dynamic call dispatch. */ + if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */ + uint32_t i; + if ((mode & 8) == 0) { /* No call hooks? */ + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = makeasmfunc(lj_bc_ofs[i]); + } else { + for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++) + disp[i] = lj_vm_callhook; + } + } + if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */ + disp[BC_FUNCF] = f_funcf; + disp[BC_FUNCV] = f_funcv; + } + +#if LJ_HASJIT + /* Reset hotcounts for JIT off to on transition. */ + if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT)) + lj_dispatch_init_hotcount(g); +#endif + } +} + +/* -- JIT mode setting ---------------------------------------------------- */ + +#if LJ_HASJIT +/* Set JIT mode for a single prototype. */ +static void setptmode(global_State *g, GCproto *pt, int mode) +{ + if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */ + pt->flags &= ~PROTO_NOJIT; + lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */ + } else { /* Flush and/or disable JIT compilation. */ + if (!(mode & LUAJIT_MODE_FLUSH)) + pt->flags |= PROTO_NOJIT; + lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */ + } +} + +/* Recursively set the JIT mode for all children of a prototype. */ +static void setptmode_all(global_State *g, GCproto *pt, int mode) +{ + ptrdiff_t i; + if (!(pt->flags & PROTO_CHILD)) return; + for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) { + GCobj *o = proto_kgc(pt, i); + if (o->gch.gct == ~LJ_TPROTO) { + setptmode(g, gco2pt(o), mode); + setptmode_all(g, gco2pt(o), mode); + } + } +} +#endif + +/* Public API function: control the JIT engine. */ +int luaJIT_setmode(lua_State *L, int idx, int mode) +{ + global_State *g = G(L); + int mm = mode & LUAJIT_MODE_MASK; + lj_trace_abort(g); /* Abort recording on any state change. */ + /* Avoid pulling the rug from under our own feet. */ + if ((g->hookmask & HOOK_GC)) + lj_err_caller(L, LJ_ERR_NOGCMM); + switch (mm) { +#if LJ_HASJIT + case LUAJIT_MODE_ENGINE: + if ((mode & LUAJIT_MODE_FLUSH)) { + lj_trace_flushall(L); + } else { + if (!(mode & LUAJIT_MODE_ON)) + G2J(g)->flags &= ~(uint32_t)JIT_F_ON; +#if LJ_TARGET_X86ORX64 + else if ((G2J(g)->flags & JIT_F_SSE2)) + G2J(g)->flags |= (uint32_t)JIT_F_ON; + else + return 0; /* Don't turn on JIT compiler without SSE2 support. */ +#else + else + G2J(g)->flags |= (uint32_t)JIT_F_ON; +#endif + lj_dispatch_update(g); + } + break; + case LUAJIT_MODE_FUNC: + case LUAJIT_MODE_ALLFUNC: + case LUAJIT_MODE_ALLSUBFUNC: { + cTValue *tv = idx == 0 ? frame_prev(L->base-1) : + idx > 0 ? L->base + (idx-1) : L->top + idx; + GCproto *pt; + if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn)) + pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */ + else if (tvisproto(tv)) + pt = protoV(tv); + else + return 0; /* Failed. */ + if (mm != LUAJIT_MODE_ALLSUBFUNC) + setptmode(g, pt, mode); + if (mm != LUAJIT_MODE_FUNC) + setptmode_all(g, pt, mode); + break; + } + case LUAJIT_MODE_TRACE: + if (!(mode & LUAJIT_MODE_FLUSH)) + return 0; /* Failed. */ + lj_trace_flush(G2J(g), idx); + break; +#else + case LUAJIT_MODE_ENGINE: + case LUAJIT_MODE_FUNC: + case LUAJIT_MODE_ALLFUNC: + case LUAJIT_MODE_ALLSUBFUNC: + UNUSED(idx); + if ((mode & LUAJIT_MODE_ON)) + return 0; /* Failed. */ + break; +#endif + case LUAJIT_MODE_WRAPCFUNC: + if ((mode & LUAJIT_MODE_ON)) { + if (idx != 0) { + cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx; + if (tvislightud(tv)) + g->wrapf = (lua_CFunction)lightudV(tv); + else + return 0; /* Failed. */ + } else { + return 0; /* Failed. */ + } + g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0); + } else { + g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0); + } + break; + default: + return 0; /* Failed. */ + } + return 1; /* OK. */ +} + +/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */ +LUA_API void LUAJIT_VERSION_SYM(void) +{ +} + +/* -- Hooks --------------------------------------------------------------- */ + +/* This function can be called asynchronously (e.g. during a signal). */ +LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count) +{ + global_State *g = G(L); + mask &= HOOK_EVENTMASK; + if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */ + g->hookf = func; + g->hookcount = g->hookcstart = (int32_t)count; + g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask); + lj_trace_abort(g); /* Abort recording on any hook change. */ + lj_dispatch_update(g); + return 1; +} + +LUA_API lua_Hook lua_gethook(lua_State *L) +{ + return G(L)->hookf; +} + +LUA_API int lua_gethookmask(lua_State *L) +{ + return G(L)->hookmask & HOOK_EVENTMASK; +} + +LUA_API int lua_gethookcount(lua_State *L) +{ + return (int)G(L)->hookcstart; +} + +/* Call a hook. */ +static void callhook(lua_State *L, int event, BCLine line) +{ + global_State *g = G(L); + lua_Hook hookf = g->hookf; + if (hookf && !hook_active(g)) { + lua_Debug ar; + lj_trace_abort(g); /* Abort recording on any hook call. */ + ar.event = event; + ar.currentline = line; + /* Top frame, nextframe = NULL. */ + ar.i_ci = (int)((L->base-1) - tvref(L->stack)); + lj_state_checkstack(L, 1+LUA_MINSTACK); + hook_enter(g); + hookf(L, &ar); + lua_assert(hook_active(g)); + hook_leave(g); + } +} + +/* -- Dispatch callbacks -------------------------------------------------- */ + +/* Calculate number of used stack slots in the current frame. */ +static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres) +{ + BCIns ins = pc[-1]; + if (bc_op(ins) == BC_UCLO) + ins = pc[bc_j(ins)]; + switch (bc_op(ins)) { + case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1; + case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1; + case BC_TSETM: return bc_a(ins) + nres-1; + default: return pt->framesize; + } +} + +/* Instruction dispatch. Used by instr/line/return hooks or when recording. */ +void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc) +{ + ERRNO_SAVE + GCfunc *fn = curr_func(L); + GCproto *pt = funcproto(fn); + void *cf = cframe_raw(L->cframe); + const BCIns *oldpc = cframe_pc(cf); + global_State *g = G(L); + BCReg slots; + setcframe_pc(cf, pc); + slots = cur_topslot(pt, pc, cframe_multres_n(cf)); + L->top = L->base + slots; /* Fix top. */ +#if LJ_HASJIT + { + jit_State *J = G2J(g); + if (J->state != LJ_TRACE_IDLE) { + J->L = L; + lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ + } + } +#endif + if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) { + g->hookcount = g->hookcstart; + callhook(L, LUA_HOOKCOUNT, -1); + L->top = L->base + slots; /* Fix top again. */ + } + if ((g->hookmask & LUA_MASKLINE)) { + BCPos npc = proto_bcpos(pt, pc) - 1; + BCPos opc = proto_bcpos(pt, oldpc) - 1; + BCLine line = lj_debug_line(pt, npc); + if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) { + callhook(L, LUA_HOOKLINE, line); + L->top = L->base + slots; /* Fix top again. */ + } + } + if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1]))) + callhook(L, LUA_HOOKRET, -1); + ERRNO_RESTORE +} + +/* Initialize call. Ensure stack space and return # of missing parameters. */ +static int call_init(lua_State *L, GCfunc *fn) +{ + if (isluafunc(fn)) { + GCproto *pt = funcproto(fn); + int numparams = pt->numparams; + int gotparams = (int)(L->top - L->base); + int need = pt->framesize; + if ((pt->flags & PROTO_VARARG)) need += 1+gotparams; + lj_state_checkstack(L, (MSize)need); + numparams -= gotparams; + return numparams >= 0 ? numparams : 0; + } else { + lj_state_checkstack(L, LUA_MINSTACK); + return 0; + } +} + +/* Call dispatch. Used by call hooks, hot calls or when recording. */ +ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) +{ + ERRNO_SAVE + GCfunc *fn = curr_func(L); + BCOp op; + global_State *g = G(L); +#if LJ_HASJIT + jit_State *J = G2J(g); +#endif + int missing = call_init(L, fn); +#if LJ_HASJIT + J->L = L; + if ((uintptr_t)pc & 1) { /* Marker for hot call. */ + pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); + lj_trace_hot(J, pc); + goto out; + } else if (J->state != LJ_TRACE_IDLE && + !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { + /* Record the FUNC* bytecodes, too. */ + lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ + } +#endif + if ((g->hookmask & LUA_MASKCALL)) { + int i; + for (i = 0; i < missing; i++) /* Add missing parameters. */ + setnilV(L->top++); + callhook(L, LUA_HOOKCALL, -1); + /* Preserve modifications of missing parameters by lua_setlocal(). */ + while (missing-- > 0 && tvisnil(L->top - 1)) + L->top--; + } +#if LJ_HASJIT +out: +#endif + op = bc_op(pc[-1]); /* Get FUNC* op. */ +#if LJ_HASJIT + /* Use the non-hotcounting variants if JIT is off or while recording. */ + if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) && + (op == BC_FUNCF || op == BC_FUNCV)) + op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF); +#endif + ERRNO_RESTORE + return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */ +} + diff --git a/src/luajit2/src/lj_dispatch.h b/src/luajit2/src/lj_dispatch.h new file mode 100644 index 0000000000..dd4f68fe26 --- /dev/null +++ b/src/luajit2/src/lj_dispatch.h @@ -0,0 +1,89 @@ +/* +** Instruction dispatch handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_DISPATCH_H +#define _LJ_DISPATCH_H + +#include "lj_obj.h" +#include "lj_bc.h" +#if LJ_HASJIT +#include "lj_jit.h" +#endif + +/* Type of hot counter. Must match the code in the assembler VM. */ +/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */ +typedef uint16_t HotCount; + +/* Number of hot counter hash table entries (must be a power of two). */ +#define HOTCOUNT_SIZE 64 +#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount)) + +/* This solves a circular dependency problem -- bump as needed. Sigh. */ +#define GG_NUM_ASMFF 62 + +#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF) +#define GG_LEN_SDISP BC_FUNCF +#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP) + +/* Global state, main thread and extra fields are allocated together. */ +typedef struct GG_State { + lua_State L; /* Main thread. */ + global_State g; /* Global state. */ +#if LJ_HASJIT + jit_State J; /* JIT state. */ + HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */ +#endif + ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */ + BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */ +} GG_State; + +#define GG_OFS(field) ((int)offsetof(GG_State, field)) +#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g))) +#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J))) +#define L2GG(L) (G2GG(G(L))) +#define J2G(J) (&J2GG(J)->g) +#define G2J(gl) (&G2GG(gl)->J) +#define L2J(L) (&L2GG(L)->J) +#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g)) +#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch)) +#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch)) +#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch)) +#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction)) + +#define hotcount_get(gg, pc) \ + (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)] +#define hotcount_set(gg, pc, val) \ + (hotcount_get((gg), (pc)) = (HotCount)(val)) + +/* Dispatch table management. */ +LJ_FUNC void lj_dispatch_init(GG_State *GG); +#if LJ_HASJIT +LJ_FUNC void lj_dispatch_init_hotcount(global_State *g); +#endif +LJ_FUNC void lj_dispatch_update(global_State *g); + +/* Instruction dispatch callback for hooks or when recording. */ +LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc); +LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); +LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc); + +#if LJ_HASFFI && !defined(_BUILDVM_H) +/* Save/restore errno and GetLastError() around hooks, exits and recording. */ +#include <errno.h> +#if LJ_TARGET_WINDOWS +#define WIN32_LEAN_AND_MEAN +#include <windows.h> +#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError(); +#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr); +#else +#define ERRNO_SAVE int olderr = errno; +#define ERRNO_RESTORE errno = olderr; +#endif +#else +#define ERRNO_SAVE +#define ERRNO_RESTORE +#endif + +#endif diff --git a/src/luajit2/src/lj_emit_arm.h b/src/luajit2/src/lj_emit_arm.h new file mode 100644 index 0000000000..ea90852000 --- /dev/null +++ b/src/luajit2/src/lj_emit_arm.h @@ -0,0 +1,300 @@ +/* +** ARM instruction emitter. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Constant encoding --------------------------------------------------- */ + +static uint8_t emit_invai[16] = { + /* AND */ (ARMI_AND^ARMI_BIC) >> 21, + /* EOR */ 0, + /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21, + /* RSB */ 0, + /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21, + /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21, + /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21, + /* RSC */ 0, + /* TST */ 0, + /* TEQ */ 0, + /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21, + /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21, + /* ORR */ 0, + /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21, + /* BIC */ (ARMI_BIC^ARMI_AND) >> 21, + /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21 +}; + +/* Encode constant in K12 format for data processing instructions. */ +static uint32_t emit_isk12(ARMIns ai, int32_t n) +{ + uint32_t invai, i, m = (uint32_t)n; + /* K12: unsigned 8 bit value, rotated in steps of two bits. */ + for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) + if (m <= 255) return ARMI_K12|m|i; + /* Otherwise try negation/complement with the inverse instruction. */ + invai = emit_invai[((ai >> 21) & 15)]; + if (!invai) return 0; /* Failed. No inverse instruction. */ + m = ~(uint32_t)n; + if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) || + invai == (ARMI_CMP^ARMI_CMN) >> 21) m++; + for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2)) + if (m <= 255) return ARMI_K12|(invai<<21)|m|i; + return 0; /* Failed. */ +} + +/* -- Emit basic instructions --------------------------------------------- */ + +static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm); +} + +static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm); +} + +static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn) +{ + *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn); +} + +static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm) +{ + *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm); +} + +static void emit_d(ASMState *as, ARMIns ai, Reg rd) +{ + *--as->mcp = ai | ARMF_D(rd); +} + +static void emit_n(ASMState *as, ARMIns ai, Reg rn) +{ + *--as->mcp = ai | ARMF_N(rn); +} + +static void emit_m(ASMState *as, ARMIns ai, Reg rm) +{ + *--as->mcp = ai | ARMF_M(rm); +} + +static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) +{ + lua_assert(ofs >= -255 && ofs <= 255); + if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; + *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | + ((ofs & 0xf0) << 4) | (ofs & 0x0f); +} + +static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) +{ + lua_assert(ofs >= -4095 && ofs <= 4095); + /* Combine LDR/STR pairs to LDRD/STRD. */ + if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && + (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && + (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) && + as->mcp != as->mcloop) { + as->mcp++; + emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4); + return; + } + if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; + *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs; +} + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* Prefer spills of BASE/L. */ +#define emit_canremat(ref) ((ref) < ASMREF_L) + +/* Try to find a one step delta relative to another constant. */ +static int emit_kdelta1(ASMState *as, Reg d, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != d); + if (emit_canremat(ref)) { + int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); + uint32_t k = emit_isk12(ARMI_ADD, delta); + if (k) { + if (k == ARMI_K12) + emit_dm(as, ARMI_MOV, d, r); + else + emit_dn(as, ARMI_ADD^k, d, r); + return 1; + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Try to find a two step delta relative to another constant. */ +static int emit_kdelta2(ASMState *as, Reg d, int32_t i) +{ + RegSet work = ~as->freeset & RSET_GPR; + while (work) { + Reg r = rset_picktop(work); + IRRef ref = regcost_ref(as->cost[r]); + lua_assert(r != d); + if (emit_canremat(ref)) { + int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); + uint32_t sh, inv = 0, k2, k; + if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; } + sh = lj_ffs(delta) & ~1; + k2 = emit_isk12(0, delta & (255 << sh)); + k = emit_isk12(0, delta & ~(255 << sh)); + if (k) { + emit_dn(as, ARMI_ADD^k2^inv, d, d); + emit_dn(as, ARMI_ADD^k^inv, d, r); + return 1; + } + } + rset_clear(work, r); + } + return 0; /* Failed. */ +} + +/* Load a 32 bit constant into a GPR. */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + uint32_t k = emit_isk12(ARMI_MOV, i); + lua_assert(rset_test(as->freeset, r) || r == RID_TMP); + if (k) { + /* Standard K12 constant. */ + emit_d(as, ARMI_MOV^k, r); + } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { + /* 16 bit loword constant for ARMv6T2. */ + emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); + } else if (emit_kdelta1(as, r, i)) { + /* One step delta relative to another constant. */ + } else if ((as->flags & JIT_F_ARMV6T2)) { + /* 32 bit hiword/loword constant for ARMv6T2. */ + emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r); + emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); + } else if (emit_kdelta2(as, r, i)) { + /* Two step delta relative to another constant. */ + } else { + /* Otherwise construct the constant with up to 4 instructions. */ + /* NYI: use mvn+bic, use pc-relative loads. */ + for (;;) { + uint32_t sh = lj_ffs(i) & ~1; + int32_t m = i & (255 << sh); + i &= ~(255 << sh); + if (i == 0) { + emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r); + break; + } + emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r); + } + } +} + +#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) + +static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); + +/* Get/set from constant pointer. */ +static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p) +{ + int32_t i = i32ptr(p); + emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)), + (i & 4095)); +} + +/* Get/set global_State fields. */ +#define emit_getgl(as, r, field) \ + emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field) +#define emit_setgl(as, r, field) \ + emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field) + +/* Trace number is determined from pc of exit instruction. */ +#define emit_setvmstate(as, i) UNUSED(i) + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for internal jumps. */ +typedef MCode *MCLabel; + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +static void emit_branch(ASMState *as, ARMIns ai, MCode *target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = (target - p) - 1; + lua_assert(((delta + 0x00800000) >> 24) == 0); + *--p = ai | ((uint32_t)delta & 0x00ffffffu); + as->mcp = p; +} + +static void emit_call(ASMState *as, void *target) +{ + MCode *p = --as->mcp; + ptrdiff_t delta = ((char *)target - (char *)p) - 8; + if ((((delta>>2) + 0x00800000) >> 24) == 0) { + if ((delta & 1)) + *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27); + else + *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu); + } else { /* Target out of range: need indirect call. But don't use R0-R3. */ + Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1)); + *p = ARMI_BLXr | ARMF_M(r); + } +} + +/* -- Emit generic operations --------------------------------------------- */ + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); + if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */ + MCode ins = *as->mcp, swp = (src^dst); + if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) { + if (!((ins ^ (dst << 16)) & 0x000f0000)) + *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */ + if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000)) + *as->mcp = ins ^ (swp << 12); /* Swap D in store. */ + } + } + emit_dm(as, ARMI_MOV, dst, src); +} + +/* Generic load of register from stack slot. */ +static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) +{ + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); + emit_lso(as, ARMI_LDR, r, RID_SP, ofs); +} + +/* Generic store of register to stack slot. */ +static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) +{ + lua_assert(!irt_isnum(ir->t)); UNUSED(ir); + emit_lso(as, ARMI_STR, r, RID_SP, ofs); +} + +/* Emit an arithmetic/logic operation with a constant operand. */ +static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src, + int32_t i, RegSet allow) +{ + uint32_t k = emit_isk12(ai, i); + if (k) + emit_dn(as, ai^k, dest, src); + else + emit_dnm(as, ai, dest, src, ra_allock(as, i, allow)); +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) + emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r)); +} + +#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs)) + diff --git a/src/luajit2/src/lj_emit_x86.h b/src/luajit2/src/lj_emit_x86.h new file mode 100644 index 0000000000..6c06184dd2 --- /dev/null +++ b/src/luajit2/src/lj_emit_x86.h @@ -0,0 +1,457 @@ +/* +** x86/x64 instruction emitter. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* -- Emit basic instructions --------------------------------------------- */ + +#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7))) + +#if LJ_64 +#define REXRB(p, rr, rb) \ + { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \ + if (rex != 0x40) *--(p) = rex; } +#define FORCE_REX 0x200 +#define REX_64 (FORCE_REX|0x080000) +#else +#define REXRB(p, rr, rb) ((void)0) +#define FORCE_REX 0 +#define REX_64 0 +#endif + +#define emit_i8(as, i) (*--as->mcp = (MCode)(i)) +#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4) +#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4) + +#define emit_x87op(as, xo) \ + (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2) + +/* op */ +static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx, + MCode *p, int delta) +{ + int n = (int8_t)xo; +#if defined(__GNUC__) + if (__builtin_constant_p(xo) && n == -2) + p[delta-2] = (MCode)(xo >> 24); + else if (__builtin_constant_p(xo) && n == -3) + *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16); + else +#endif + *(uint32_t *)(p+delta-5) = (uint32_t)xo; + p += n + delta; +#if LJ_64 + { + uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1); + if (rex != 0x40) { + rex |= (rr >> 16); + if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); } + else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; } + *--p = (MCode)rex; + } + } +#else + UNUSED(rr); UNUSED(rb); UNUSED(rx); +#endif + return p; +} + +/* op + modrm */ +#define emit_opm(xo, mode, rr, rb, p, delta) \ + (p[(delta)-1] = MODRM((mode), (rr), (rb)), \ + emit_op((xo), (rr), (rb), 0, (p), (delta))) + +/* op + modrm + sib */ +#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \ + (p[-1] = MODRM((scale), (rx), (rb)), \ + p[-2] = MODRM((mode), (rr), RID_ESP), \ + emit_op((xo), (rr), (rb), (rx), (p), -1)) + +/* op r1, r2 */ +static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) +{ + MCode *p = as->mcp; + as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0); +} + +#if LJ_64 && defined(LUA_USE_ASSERT) +/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ +static int32_t ptr2addr(const void *p) +{ + lua_assert((uintptr_t)p < (uintptr_t)0x80000000); + return i32ptr(p); +} +#else +#define ptr2addr(p) (i32ptr((p))) +#endif + +/* op r, [addr] */ +static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr) +{ + MCode *p = as->mcp; + *(int32_t *)(p-4) = ptr2addr(addr); +#if LJ_64 + p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5); +#else + as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4); +#endif +} + +/* op r, [base+ofs] */ +static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) +{ + MCode *p = as->mcp; + x86Mode mode; + if (ra_hasreg(rb)) { + if (ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(ofs)) { + *--p = (MCode)ofs; + mode = XM_OFS8; + } else { + p -= 4; + *(int32_t *)p = ofs; + mode = XM_OFS32; + } + if ((rb&7) == RID_ESP) + *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } else { + *(int32_t *)(p-4) = ofs; +#if LJ_64 + p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + p -= 5; + rb = RID_ESP; +#else + p -= 4; + rb = RID_EBP; +#endif + mode = XM_OFS0; + } + as->mcp = emit_opm(xo, mode, rr, rb, p, 0); +} + +/* op r, [base+idx*scale+ofs] */ +static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx, + x86Mode scale, int32_t ofs) +{ + MCode *p = as->mcp; + x86Mode mode; + if (ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(ofs)) { + mode = XM_OFS8; + *--p = (MCode)ofs; + } else { + mode = XM_OFS32; + p -= 4; + *(int32_t *)p = ofs; + } + as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p); +} + +/* op r, i */ +static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i) +{ + MCode *p = as->mcp; + x86Op xo; + if (checki8(i)) { + *--p = (MCode)i; + xo = XG_TOXOi8(xg); + } else { + p -= 4; + *(int32_t *)p = i; + xo = XG_TOXOi(xg); + } + as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0); +} + +/* op [base+ofs], i */ +static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs, + int32_t i) +{ + x86Op xo; + if (checki8(i)) { + emit_i8(as, i); + xo = XG_TOXOi8(xg); + } else { + emit_i32(as, i); + xo = XG_TOXOi(xg); + } + emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs); +} + +#define emit_shifti(as, xg, r, i) \ + (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r))) + +/* op r, rm/mrm */ +static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) +{ + MCode *p = as->mcp; + x86Mode mode = XM_REG; + if (rb == RID_MRM) { + rb = as->mrm.base; + if (rb == RID_NONE) { + rb = RID_EBP; + mode = XM_OFS0; + p -= 4; + *(int32_t *)p = as->mrm.ofs; + if (as->mrm.idx != RID_NONE) + goto mrmidx; +#if LJ_64 + *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); + rb = RID_ESP; +#endif + } else { + if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { + mode = XM_OFS0; + } else if (checki8(as->mrm.ofs)) { + *--p = (MCode)as->mrm.ofs; + mode = XM_OFS8; + } else { + p -= 4; + *(int32_t *)p = as->mrm.ofs; + mode = XM_OFS32; + } + if (as->mrm.idx != RID_NONE) { + mrmidx: + as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p); + return; + } + if ((rb&7) == RID_ESP) + *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); + } + } + as->mcp = emit_opm(xo, mode, rr, rb, p, 0); +} + +/* op rm/mrm, i */ +static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i) +{ + x86Op xo; + if (checki8(i)) { + emit_i8(as, i); + xo = XG_TOXOi8(xg); + } else { + emit_i32(as, i); + xo = XG_TOXOi(xg); + } + emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64)); +} + +/* -- Emit loads/stores --------------------------------------------------- */ + +/* Instruction selection for XMM moves. */ +#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS) +#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD) + +/* mov [base+ofs], i */ +static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) +{ + emit_i32(as, i); + emit_rmro(as, XO_MOVmi, 0, base, ofs); +} + +/* mov [base+ofs], r */ +#define emit_movtomro(as, r, base, ofs) \ + emit_rmro(as, XO_MOVto, (r), (base), (ofs)) + +/* Get/set global_State fields. */ +#define emit_opgl(as, xo, r, field) \ + emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) +#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) +#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) + +#define emit_setvmstate(as, i) \ + (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate)) + +/* mov r, i / xor r, r */ +static void emit_loadi(ASMState *as, Reg r, int32_t i) +{ + /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */ + if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP || + (as->curins+1 < as->T->nins && + IR(as->curins+1)->o == IR_HIOP)))) { + emit_rr(as, XO_ARITH(XOg_XOR), r, r); + } else { + MCode *p = as->mcp; + *(int32_t *)(p-4) = i; + p[-5] = (MCode)(XI_MOVri+(r&7)); + p -= 5; + REXRB(p, 0, r); + as->mcp = p; + } +} + +/* mov r, addr */ +#define emit_loada(as, r, addr) \ + emit_loadi(as, (r), ptr2addr((addr))) + +#if LJ_64 +/* mov r, imm64 or shorter 32 bit extended load. */ +static void emit_loadu64(ASMState *as, Reg r, uint64_t u64) +{ + if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */ + emit_loadi(as, r, (int32_t)u64); + } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */ + MCode *p = as->mcp; + *(int32_t *)(p-4) = (int32_t)u64; + as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); + } else { /* Full-size 64 bit load. */ + MCode *p = as->mcp; + *(uint64_t *)(p-8) = u64; + p[-9] = (MCode)(XI_MOVri+(r&7)); + p[-10] = 0x48 + ((r>>3)&1); + p -= 10; + as->mcp = p; + } +} +#endif + +/* movsd r, [&tv->n] / xorps r, r */ +static void emit_loadn(ASMState *as, Reg r, cTValue *tv) +{ + if (tvispzero(tv)) /* Use xor only for +0. */ + emit_rr(as, XO_XORPS, r, r); + else + emit_rma(as, XMM_MOVRM(as), r, &tv->n); +} + +/* -- Emit control-flow instructions -------------------------------------- */ + +/* Label for short jumps. */ +typedef MCode *MCLabel; + +#if LJ_32 && LJ_HASFFI +/* jmp short target */ +static void emit_sjmp(ASMState *as, MCLabel target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = target - p; + lua_assert(delta == (int8_t)delta); + p[-1] = (MCode)(int8_t)delta; + p[-2] = XI_JMPs; + as->mcp = p - 2; +} +#endif + +/* jcc short target */ +static void emit_sjcc(ASMState *as, int cc, MCLabel target) +{ + MCode *p = as->mcp; + ptrdiff_t delta = target - p; + lua_assert(delta == (int8_t)delta); + p[-1] = (MCode)(int8_t)delta; + p[-2] = (MCode)(XI_JCCs+(cc&15)); + as->mcp = p - 2; +} + +/* jcc short (pending target) */ +static MCLabel emit_sjcc_label(ASMState *as, int cc) +{ + MCode *p = as->mcp; + p[-1] = 0; + p[-2] = (MCode)(XI_JCCs+(cc&15)); + as->mcp = p - 2; + return p; +} + +/* Fixup jcc short target. */ +static void emit_sfixup(ASMState *as, MCLabel source) +{ + source[-1] = (MCode)(as->mcp-source); +} + +/* Return label pointing to current PC. */ +#define emit_label(as) ((as)->mcp) + +/* Compute relative 32 bit offset for jump and call instructions. */ +static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) +{ + ptrdiff_t delta = target - p; + lua_assert(delta == (int32_t)delta); + return (int32_t)delta; +} + +/* jcc target */ +static void emit_jcc(ASMState *as, int cc, MCode *target) +{ + MCode *p = as->mcp; + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = (MCode)(XI_JCCn+(cc&15)); + p[-6] = 0x0f; + as->mcp = p - 6; +} + +/* call target */ +static void emit_call_(ASMState *as, MCode *target) +{ + MCode *p = as->mcp; +#if LJ_64 + if (target-p != (int32_t)(target-p)) { + /* Assumes RID_RET is never an argument to calls and always clobbered. */ + emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET); + emit_loadu64(as, RID_RET, (uint64_t)target); + return; + } +#endif + *(int32_t *)(p-4) = jmprel(p, target); + p[-5] = XI_CALL; + as->mcp = p - 5; +} + +#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f)) + +/* -- Emit generic operations --------------------------------------------- */ + +/* Use 64 bit operations to handle 64 bit IR types. */ +#if LJ_64 +#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) +#else +#define REX_64IR(ir, r) (r) +#endif + +/* Generic move between two regs. */ +static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) +{ + UNUSED(ir); + if (dst < RID_MAX_GPR) + emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); + else + emit_rr(as, XMM_MOVRR(as), dst, src); +} + +/* Generic load of register from stack slot. */ +static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs); + else + emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs); +} + +/* Generic store of register to stack slot. */ +static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) +{ + if (r < RID_MAX_GPR) + emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs); + else + emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs); +} + +/* Add offset to pointer. */ +static void emit_addptr(ASMState *as, Reg r, int32_t ofs) +{ + if (ofs) { + if ((as->flags & JIT_F_LEA_AGU)) + emit_rmro(as, XO_LEA, r, r, ofs); + else + emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs); + } +} + +#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs)) + +/* Prefer rematerialization of BASE/L from global_State over spills. */ +#define emit_canremat(ref) ((ref) <= REF_BASE) + diff --git a/src/luajit2/src/lj_err.c b/src/luajit2/src/lj_err.c new file mode 100644 index 0000000000..42bb87fc66 --- /dev/null +++ b/src/luajit2/src/lj_err.c @@ -0,0 +1,711 @@ +/* +** Error handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_err_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_func.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_ff.h" +#include "lj_trace.h" +#include "lj_vm.h" + +/* +** LuaJIT can either use internal or external frame unwinding: +** +** - Internal frame unwinding (INT) is free-standing and doesn't require +** any OS or library support. +** +** - External frame unwinding (EXT) uses the system-provided unwind handler. +** +** Pros and Cons: +** +** - EXT requires unwind tables for *all* functions on the C stack between +** the pcall/catch and the error/throw. This is the default on x64, +** but needs to be manually enabled on x86/PPC for non-C++ code. +** +** - INT is faster when actually throwing errors (but this happens rarely). +** Setting up error handlers is zero-cost in any case. +** +** - EXT provides full interoperability with C++ exceptions. You can throw +** Lua errors or C++ exceptions through a mix of Lua frames and C++ frames. +** C++ destructors are called as needed. C++ exceptions caught by pcall +** are converted to the string "C++ exception". Lua errors can be caught +** with catch (...) in C++. +** +** - INT has only limited support for automatically catching C++ exceptions +** on POSIX systems using DWARF2 stack unwinding. Other systems may use +** the wrapper function feature. Lua errors thrown through C++ frames +** cannot be caught by C++ code and C++ destructors are not run. +** +** EXT is the default on x64 systems, INT is the default on all other systems. +** +** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack +** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled +** with -funwind-tables (or -fexceptions). This includes LuaJIT itself (set +** TARGET_CFLAGS), all of your C/Lua binding code, all loadable C modules +** and all C libraries that have callbacks which may be used to call back +** into Lua. C++ code must *not* be compiled with -fno-exceptions. +** +** EXT cannot be enabled on WIN32 since system exceptions use code-driven SEH. +** EXT is mandatory on WIN64 since the calling convention has an abundance +** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15). +** EXT is mandatory on POSIX/x64 since the interpreter doesn't save r12/r13. +*/ + +#if defined(__GNUC__) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) +#define LJ_UNWIND_EXT 1 +#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS +#define LJ_UNWIND_EXT 1 +#endif + +/* -- Error messages ------------------------------------------------------ */ + +/* Error message strings. */ +LJ_DATADEF const char *lj_err_allmsg = +#define ERRDEF(name, msg) msg "\0" +#include "lj_errmsg.h" +; + +/* -- Internal frame unwinding -------------------------------------------- */ + +/* Unwind Lua stack and move error message to new top. */ +LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top) +{ + lj_func_closeuv(L, top); + if (top < L->top-1) { + copyTV(L, top, L->top-1); + L->top = top+1; + } + lj_state_relimitstack(L); +} + +/* Unwind until stop frame. Optionally cleanup frames. */ +static void *err_unwind(lua_State *L, void *stopcf, int errcode) +{ + TValue *frame = L->base-1; + void *cf = L->cframe; + while (cf) { + int32_t nres = cframe_nres(cframe_raw(cf)); + if (nres < 0) { /* C frame without Lua frame? */ + TValue *top = restorestack(L, -nres); + if (frame < top) { /* Frame reached? */ + if (errcode) { + L->cframe = cframe_prev(cf); + L->base = frame+1; + unwindstack(L, top); + } + return cf; + } + } + if (frame <= tvref(L->stack)) + break; + switch (frame_typep(frame)) { + case FRAME_LUA: /* Lua frame. */ + case FRAME_LUAP: + frame = frame_prevl(frame); + break; + case FRAME_C: /* C frame. */ +#if LJ_UNWIND_EXT + if (errcode) { + L->cframe = cframe_prev(cf); + L->base = frame_prevd(frame) + 1; + unwindstack(L, frame); + } else if (cf != stopcf) { + cf = cframe_prev(cf); + frame = frame_prevd(frame); + break; + } + return NULL; /* Continue unwinding. */ +#else + UNUSED(stopcf); + cf = cframe_prev(cf); + frame = frame_prevd(frame); + break; +#endif + case FRAME_CP: /* Protected C frame. */ + if (cframe_canyield(cf)) { /* Resume? */ + if (errcode) { + L->cframe = NULL; + L->status = (uint8_t)errcode; + } + return cf; + } + if (errcode) { + L->cframe = cframe_prev(cf); + L->base = frame_prevd(frame) + 1; + unwindstack(L, frame); + } + return cf; + case FRAME_CONT: /* Continuation frame. */ + case FRAME_VARG: /* Vararg frame. */ + frame = frame_prevd(frame); + break; + case FRAME_PCALL: /* FF pcall() frame. */ + case FRAME_PCALLH: /* FF pcall() frame inside hook. */ + if (errcode) { + if (errcode == LUA_YIELD) { + frame = frame_prevd(frame); + break; + } + if (frame_typep(frame) == FRAME_PCALL) + hook_leave(G(L)); + L->cframe = cf; + L->base = frame_prevd(frame) + 1; + unwindstack(L, L->base); + } + return (void *)((intptr_t)cf | CFRAME_UNWIND_FF); + } + } + /* No C frame. */ + if (errcode) { + L->cframe = NULL; + L->base = tvref(L->stack)+1; + unwindstack(L, L->base); + if (G(L)->panic) + G(L)->panic(L); + exit(EXIT_FAILURE); + } + return L; /* Anything non-NULL will do. */ +} + +/* -- External frame unwinding -------------------------------------------- */ + +#if defined(__GNUC__) && !defined(__symbian__) + +#ifdef __clang__ +/* http://llvm.org/bugs/show_bug.cgi?id=8703 */ +#define __unwind_word__ word +#endif + +#include <unwind.h> + +#if !LJ_TARGET_ARM + +#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */ +#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (_Unwind_Exception_Class)(c)) +#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff) +#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff)) + +/* DWARF2 personality handler referenced from interpreter .eh_frame. */ +LJ_FUNCA int lj_err_unwind_dwarf(int version, _Unwind_Action actions, + _Unwind_Exception_Class uexclass, struct _Unwind_Exception *uex, + struct _Unwind_Context *ctx) +{ + void *cf; + lua_State *L; + if (version != 1) + return _URC_FATAL_PHASE1_ERROR; + UNUSED(uexclass); + cf = (void *)_Unwind_GetCFA(ctx); + L = cframe_L(cf); + if ((actions & _UA_SEARCH_PHASE)) { +#if LJ_UNWIND_EXT + if (err_unwind(L, cf, 0) == NULL) + return _URC_CONTINUE_UNWIND; +#endif + if (!LJ_UEXCLASS_CHECK(uexclass)) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + } + return _URC_HANDLER_FOUND; + } + if ((actions & _UA_CLEANUP_PHASE)) { + int errcode; + if (LJ_UEXCLASS_CHECK(uexclass)) { + errcode = LJ_UEXCLASS_ERRCODE(uexclass); + } else { + if ((actions & _UA_HANDLER_FRAME)) + _Unwind_DeleteException(uex); + errcode = LUA_ERRRUN; + } +#if LJ_UNWIND_EXT + cf = err_unwind(L, cf, errcode); + if (cf) { + _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); + _Unwind_SetIP(ctx, (_Unwind_Ptr)(cframe_unwind_ff(cf) ? + lj_vm_unwind_ff_eh : + lj_vm_unwind_c_eh)); + return _URC_INSTALL_CONTEXT; + } +#if LJ_TARGET_X86ORX64 + else if ((actions & _UA_HANDLER_FRAME)) { + /* Workaround for ancient libgcc bug. Still present in RHEL 5.5. :-/ + ** Real fix: http://gcc.gnu.org/viewcvs/trunk/gcc/unwind-dw2.c?r1=121165&r2=124837&pathrev=153877&diff_format=h + */ + _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode); + _Unwind_SetIP(ctx, (_Unwind_Ptr)lj_vm_unwind_rethrow); + return _URC_INSTALL_CONTEXT; + } +#endif +#else + /* This is not the proper way to escape from the unwinder. We get away with + ** it on x86/PPC because the interpreter restores all callee-saved regs. + */ + lj_err_throw(L, errcode); +#endif + } + return _URC_CONTINUE_UNWIND; +} + +#if LJ_UNWIND_EXT +/* NYI: this is not thread-safe. */ +static struct _Unwind_Exception static_uex; + +/* Raise DWARF2 exception. */ +static void err_raise_ext(int errcode) +{ + static_uex.exception_class = LJ_UEXCLASS_MAKE(errcode); + static_uex.exception_cleanup = NULL; + _Unwind_RaiseException(&static_uex); +} +#endif + +#else + +/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */ +LJ_FUNCA _Unwind_Reason_Code lj_err_unwind_arm(_Unwind_State state, + _Unwind_Control_Block *ucb, + _Unwind_Context *ctx) +{ + void *cf = (void *)_Unwind_GetGR(ctx, 13); + lua_State *L = cframe_L(cf); + if ((state & _US_ACTION_MASK) == _US_VIRTUAL_UNWIND_FRAME) { + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + return _URC_HANDLER_FOUND; + } + if ((state & _US_ACTION_MASK) == _US_UNWIND_FRAME_STARTING) { + _Unwind_DeleteException(ucb); + _Unwind_SetGR(ctx, 15, (_Unwind_Word)(void *)lj_err_throw); + _Unwind_SetGR(ctx, 0, (_Unwind_Word)L); + _Unwind_SetGR(ctx, 1, (_Unwind_Word)LUA_ERRRUN); + return _URC_INSTALL_CONTEXT; + } + if (__gnu_unwind_frame(ucb, ctx) != _URC_OK) + return _URC_FAILURE; + return _URC_CONTINUE_UNWIND; +} + +#endif + +#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS + +/* +** Someone in Redmond owes me several days of my life. A lot of this is +** undocumented or just plain wrong on MSDN. Some of it can be gathered +** from 3rd party docs or must be found by trial-and-error. They really +** don't want you to write your own language-specific exception handler +** or to interact gracefully with MSVC. :-( +** +** Apparently MSVC doesn't call C++ destructors for foreign exceptions +** unless you compile your C++ code with /EHa. Unfortunately this means +** catch (...) also catches things like access violations. The use of +** _set_se_translator doesn't really help, because it requires /EHa, too. +*/ + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +/* Taken from: http://www.nynaeve.net/?p=99 */ +typedef struct UndocumentedDispatcherContext { + ULONG64 ControlPc; + ULONG64 ImageBase; + PRUNTIME_FUNCTION FunctionEntry; + ULONG64 EstablisherFrame; + ULONG64 TargetIp; + PCONTEXT ContextRecord; + PEXCEPTION_ROUTINE LanguageHandler; + PVOID HandlerData; + PUNWIND_HISTORY_TABLE HistoryTable; + ULONG ScopeIndex; + ULONG Fill0; +} UndocumentedDispatcherContext; + +#ifdef _MSC_VER +/* Another wild guess. */ +extern __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow); +#endif + +#define LJ_MSVC_EXCODE ((DWORD)0xe06d7363) + +#define LJ_EXCODE ((DWORD)0xe24c4a00) +#define LJ_EXCODE_MAKE(c) (LJ_EXCODE | (DWORD)(c)) +#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff) +#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff)) + +/* Win64 exception handler for interpreter frame. */ +LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec, + void *cf, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch) +{ + lua_State *L = cframe_L(cf); + int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ? + LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN; + if ((rec->ExceptionFlags & 6)) { /* EH_UNWINDING|EH_EXIT_UNWIND */ + /* Unwind internal frames. */ + err_unwind(L, cf, errcode); + } else { + void *cf2 = err_unwind(L, cf, 0); + if (cf2) { /* We catch it, so start unwinding the upper frames. */ + if (rec->ExceptionCode == LJ_MSVC_EXCODE) { +#ifdef _MSC_VER + __DestructExceptionObject(rec, 1); +#endif + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); + } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { + /* Don't catch access violations etc. */ + return ExceptionContinueSearch; + } + /* Unwind the stack and call all handlers for all lower C frames + ** (including ourselves) again with EH_UNWINDING set. Then set + ** rsp = cf, rax = errcode and jump to the specified target. + */ + RtlUnwindEx(cf, (void *)((cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ? + lj_vm_unwind_ff_eh : + lj_vm_unwind_c_eh), + rec, (void *)errcode, ctx, dispatch->HistoryTable); + /* RtlUnwindEx should never return. */ + } + } + return ExceptionContinueSearch; +} + +/* Raise Windows exception. */ +static void err_raise_ext(int errcode) +{ + RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL); +} + +#endif + +/* -- Error handling ------------------------------------------------------ */ + +/* Throw error. Find catch frame, unwind stack and continue. */ +LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode) +{ + global_State *g = G(L); + lj_trace_abort(g); + setgcrefnull(g->jit_L); + L->status = 0; +#if LJ_UNWIND_EXT + err_raise_ext(errcode); + /* + ** A return from this function signals a corrupt C stack that cannot be + ** unwound. We have no choice but to call the panic function and exit. + ** + ** Usually this is caused by a C function without unwind information. + ** This should never happen on x64, but may happen if you've manually + ** enabled LUAJIT_UNWIND_EXTERNAL and forgot to recompile *every* + ** non-C++ file with -funwind-tables. + */ + if (G(L)->panic) + G(L)->panic(L); +#else + { + void *cf = err_unwind(L, NULL, errcode); + if (cframe_unwind_ff(cf)) + lj_vm_unwind_ff(cframe_raw(cf)); + else + lj_vm_unwind_c(cframe_raw(cf), errcode); + } +#endif + exit(EXIT_FAILURE); +} + +/* Return string object for error message. */ +LJ_NOINLINE GCstr *lj_err_str(lua_State *L, ErrMsg em) +{ + return lj_str_newz(L, err2msg(em)); +} + +/* Out-of-memory error. */ +LJ_NOINLINE void lj_err_mem(lua_State *L) +{ + if (L->status == LUA_ERRERR+1) /* Don't touch the stack during lua_open. */ + lj_vm_unwind_c(L->cframe, LUA_ERRMEM); + L->top = L->base; + setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRMEM)); + lj_err_throw(L, LUA_ERRMEM); +} + +/* Find error function for runtime errors. Requires an extra stack traversal. */ +static ptrdiff_t finderrfunc(lua_State *L) +{ + cTValue *frame = L->base-1, *bot = tvref(L->stack); + void *cf = L->cframe; + while (frame > bot) { + lua_assert(cf != NULL); + while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */ + if (frame >= restorestack(L, -cframe_nres(cf))) + break; + if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */ + return cframe_errfunc(cf); + cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */ + if (cf == NULL) + return 0; + } + switch (frame_typep(frame)) { + case FRAME_LUA: + case FRAME_LUAP: + frame = frame_prevl(frame); + break; + case FRAME_C: + cf = cframe_prev(cf); + /* fallthrough */ + case FRAME_CONT: + case FRAME_VARG: + frame = frame_prevd(frame); + break; + case FRAME_CP: + if (cframe_canyield(cf)) return 0; + if (cframe_errfunc(cf) >= 0) + return cframe_errfunc(cf); + frame = frame_prevd(frame); + break; + case FRAME_PCALL: + case FRAME_PCALLH: + if (frame_ftsz(frame) >= (ptrdiff_t)(2*sizeof(TValue))) /* xpcall? */ + return savestack(L, frame-1); /* Point to xpcall's errorfunc. */ + return 0; + default: + lua_assert(0); + return 0; + } + } + return 0; +} + +/* Runtime error. */ +LJ_NOINLINE void lj_err_run(lua_State *L) +{ + ptrdiff_t ef = finderrfunc(L); + if (ef) { + TValue *errfunc = restorestack(L, ef); + TValue *top = L->top; + lj_trace_abort(G(L)); + if (!tvisfunc(errfunc) || L->status == LUA_ERRERR) { + setstrV(L, top-1, lj_err_str(L, LJ_ERR_ERRERR)); + lj_err_throw(L, LUA_ERRERR); + } + L->status = LUA_ERRERR; + copyTV(L, top, top-1); + copyTV(L, top-1, errfunc); + L->top = top+1; + lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */ + } + lj_err_throw(L, LUA_ERRRUN); +} + +/* Formatted runtime error message. */ +LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + if (curr_funcisL(L)) L->top = curr_topL(L); + msg = lj_str_pushvf(L, err2msg(em), argp); + va_end(argp); + lj_debug_addloc(L, msg, L->base-1, NULL); + lj_err_run(L); +} + +/* Non-vararg variant for better calling conventions. */ +LJ_NOINLINE void lj_err_msg(lua_State *L, ErrMsg em) +{ + err_msgv(L, em); +} + +/* Lexer error. */ +LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok, + BCLine line, ErrMsg em, va_list argp) +{ + char buff[LUA_IDSIZE]; + const char *msg; + lj_debug_shortname(buff, src); + msg = lj_str_pushvf(L, err2msg(em), argp); + msg = lj_str_pushf(L, "%s:%d: %s", buff, line, msg); + if (tok) + lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok); + lj_err_throw(L, LUA_ERRSYNTAX); +} + +/* Typecheck error for operands. */ +LJ_NOINLINE void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm) +{ + const char *tname = typename(o); + const char *opname = err2msg(opm); + if (curr_funcisL(L)) { + GCproto *pt = curr_proto(L); + const BCIns *pc = cframe_Lpc(L) - 1; + const char *oname = NULL; + const char *kind = lj_debug_slotname(pt, pc, (BCReg)(o-L->base), &oname); + if (kind) + err_msgv(L, LJ_ERR_BADOPRT, opname, kind, oname, tname); + } + err_msgv(L, LJ_ERR_BADOPRV, opname, tname); +} + +/* Typecheck error for ordered comparisons. */ +LJ_NOINLINE void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2) +{ + const char *t1 = typename(o1); + const char *t2 = typename(o2); + err_msgv(L, t1 == t2 ? LJ_ERR_BADCMPV : LJ_ERR_BADCMPT, t1, t2); + /* This assumes the two "boolean" entries are commoned by the C compiler. */ +} + +/* Typecheck error for __call. */ +LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o) +{ + /* Gross hack if lua_[p]call or pcall/xpcall fail for a non-callable object: + ** L->base still points to the caller. So add a dummy frame with L instead + ** of a function. See lua_getstack(). + */ + const BCIns *pc = cframe_Lpc(L); + if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) { + const char *tname = typename(o); + setframe_pc(o, pc); + setframe_gc(o, obj2gco(L)); + L->top = L->base = o+1; + err_msgv(L, LJ_ERR_BADCALL, tname); + } + lj_err_optype(L, o, LJ_ERR_OPCALL); +} + +/* Error in context of caller. */ +LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg) +{ + TValue *frame = L->base-1; + TValue *pframe = NULL; + if (frame_islua(frame)) { + pframe = frame_prevl(frame); + } else if (frame_iscont(frame)) { + pframe = frame_prevd(frame); +#if LJ_HASFFI + /* Remove frame for FFI metamethods. */ + if (frame_func(frame)->c.ffid >= FF_ffi_meta___index && + frame_func(frame)->c.ffid <= FF_ffi_meta___tostring) { + L->base = pframe+1; + L->top = frame; + } +#endif + } + lj_debug_addloc(L, msg, pframe, frame); + lj_err_run(L); +} + +/* Formatted error in context of caller. */ +LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + msg = lj_str_pushvf(L, err2msg(em), argp); + va_end(argp); + lj_err_callermsg(L, msg); +} + +/* Error in context of caller. */ +LJ_NOINLINE void lj_err_caller(lua_State *L, ErrMsg em) +{ + lj_err_callermsg(L, err2msg(em)); +} + +/* Argument error message. */ +LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg, + const char *msg) +{ + const char *fname = "?"; + const char *ftype = lj_debug_funcname(L, L->base - 1, &fname); + if (narg < 0 && narg > LUA_REGISTRYINDEX) + narg = (int)(L->top - L->base) + narg + 1; + if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */ + msg = lj_str_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg); + else + msg = lj_str_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg); + lj_err_callermsg(L, msg); +} + +/* Formatted argument error. */ +LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...) +{ + const char *msg; + va_list argp; + va_start(argp, em); + msg = lj_str_pushvf(L, err2msg(em), argp); + va_end(argp); + err_argmsg(L, narg, msg); +} + +/* Argument error. */ +LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em) +{ + err_argmsg(L, narg, err2msg(em)); +} + +/* Typecheck error for arguments. */ +LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname) +{ + TValue *o = L->base + narg-1; + const char *tname = o < L->top ? typename(o) : lj_obj_typename[0]; + const char *msg = lj_str_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname); + err_argmsg(L, narg, msg); +} + +/* Typecheck error for arguments. */ +LJ_NOINLINE void lj_err_argt(lua_State *L, int narg, int tt) +{ + lj_err_argtype(L, narg, lj_obj_typename[tt+1]); +} + +/* -- Public error handling API ------------------------------------------- */ + +LUA_API lua_CFunction lua_atpanic(lua_State *L, lua_CFunction panicf) +{ + lua_CFunction old = G(L)->panic; + G(L)->panic = panicf; + return old; +} + +/* Forwarders for the public API (C calling convention and no LJ_NORET). */ +LUA_API int lua_error(lua_State *L) +{ + lj_err_run(L); + return 0; /* unreachable */ +} + +LUALIB_API int luaL_argerror(lua_State *L, int narg, const char *msg) +{ + err_argmsg(L, narg, msg); + return 0; /* unreachable */ +} + +LUALIB_API int luaL_typerror(lua_State *L, int narg, const char *xname) +{ + lj_err_argtype(L, narg, xname); + return 0; /* unreachable */ +} + +LUALIB_API void luaL_where(lua_State *L, int level) +{ + int size; + cTValue *frame = lj_debug_frame(L, level, &size); + lj_debug_addloc(L, "", frame, size ? frame+size : NULL); +} + +LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...) +{ + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = lj_str_pushvf(L, fmt, argp); + va_end(argp); + lj_err_callermsg(L, msg); + return 0; /* unreachable */ +} + diff --git a/src/luajit2/src/lj_err.h b/src/luajit2/src/lj_err.h new file mode 100644 index 0000000000..c65f484efd --- /dev/null +++ b/src/luajit2/src/lj_err.h @@ -0,0 +1,41 @@ +/* +** Error handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_ERR_H +#define _LJ_ERR_H + +#include <stdarg.h> + +#include "lj_obj.h" + +typedef enum { +#define ERRDEF(name, msg) \ + LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1, +#include "lj_errmsg.h" + LJ_ERR__MAX +} ErrMsg; + +LJ_DATA const char *lj_err_allmsg; +#define err2msg(em) (lj_err_allmsg+(int)(em)) + +LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em); +LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode); +LJ_FUNC_NORET void lj_err_mem(lua_State *L); +LJ_FUNC_NORET void lj_err_run(lua_State *L); +LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em); +LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok, + BCLine line, ErrMsg em, va_list argp); +LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm); +LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2); +LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o); +LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg); +LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...); +LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em); +LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em); +LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...); +LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname); +LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt); + +#endif diff --git a/src/luajit2/src/lj_errmsg.h b/src/luajit2/src/lj_errmsg.h new file mode 100644 index 0000000000..dc015ef2c0 --- /dev/null +++ b/src/luajit2/src/lj_errmsg.h @@ -0,0 +1,171 @@ +/* +** VM error messages. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* This file may be included multiple times with different ERRDEF macros. */ + +/* Basic error handling. */ +ERRDEF(ERRMEM, "not enough memory") +ERRDEF(ERRERR, "error in error handling") +ERRDEF(ERRCPP, "C++ exception") + +/* Allocations. */ +ERRDEF(STROV, "string length overflow") +ERRDEF(UDATAOV, "userdata length overflow") +ERRDEF(STKOV, "stack overflow") +ERRDEF(STKOVM, "stack overflow (%s)") +ERRDEF(TABOV, "table overflow") + +/* Table indexing. */ +ERRDEF(NANIDX, "table index is NaN") +ERRDEF(NILIDX, "table index is nil") +ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next")) + +/* Metamethod resolving. */ +ERRDEF(BADCALL, "attempt to call a %s value") +ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)") +ERRDEF(BADOPRV, "attempt to %s a %s value") +ERRDEF(BADCMPT, "attempt to compare %s with %s") +ERRDEF(BADCMPV, "attempt to compare two %s values") +ERRDEF(GETLOOP, "loop in gettable") +ERRDEF(SETLOOP, "loop in settable") +ERRDEF(OPCALL, "call") +ERRDEF(OPINDEX, "index") +ERRDEF(OPARITH, "perform arithmetic on") +ERRDEF(OPCAT, "concatenate") +ERRDEF(OPLEN, "get length of") + +/* Type checks. */ +ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)") +ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)") +ERRDEF(BADTYPE, "%s expected, got %s") +ERRDEF(BADVAL, "invalid value") +ERRDEF(NOVAL, "value expected") +ERRDEF(NOCORO, "coroutine expected") +ERRDEF(NOTABN, "nil or table expected") +ERRDEF(NOFUNCL, "function or level expected") +ERRDEF(NOSFT, "string/function/table expected") +ERRDEF(NOPROXY, "boolean or proxy expected") +ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number") +ERRDEF(FORLIM, LUA_QL("for") " limit must be a number") +ERRDEF(FORSTEP, LUA_QL("for") " step must be a number") + +/* C API checks. */ +ERRDEF(NOENV, "no calling environment") +ERRDEF(CYIELD, "attempt to yield across C-call boundary") +ERRDEF(BADLU, "bad light userdata pointer") +ERRDEF(NOGCMM, "bad action while in __gc metamethod") +#if LJ_TARGET_WINDOWS +ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)") +#endif + +/* Standard library function errors. */ +ERRDEF(ASSERT, "assertion failed!") +ERRDEF(PROTMT, "cannot change a protected metatable") +ERRDEF(UNPACK, "too many results to unpack") +ERRDEF(RDRSTR, "reader function must return a string") +ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print")) +ERRDEF(IDXRNG, "index out of range") +ERRDEF(BASERNG, "base out of range") +ERRDEF(LVLRNG, "level out of range") +ERRDEF(INVLVL, "invalid level") +ERRDEF(INVOPT, "invalid option") +ERRDEF(INVOPTM, "invalid option " LUA_QS) +ERRDEF(INVFMT, "invalid format") +ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object") +ERRDEF(CORUN, "cannot resume running coroutine") +ERRDEF(CODEAD, "cannot resume dead coroutine") +ERRDEF(COSUSP, "cannot resume non-suspended coroutine") +ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert")) +ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat")) +ERRDEF(TABSORT, "invalid order function for sorting") +ERRDEF(IOCLFL, "attempt to use a closed file") +ERRDEF(IOSTDCL, "standard file is closed") +ERRDEF(OSUNIQF, "unable to generate a unique filename") +ERRDEF(OSDATEF, "field " LUA_QS " missing in date table") +ERRDEF(STRDUMP, "unable to dump given function") +ERRDEF(STRSLC, "string slice too long") +ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern") +ERRDEF(STRPATC, "invalid pattern capture") +ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")") +ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")") +ERRDEF(STRPATU, "unbalanced pattern") +ERRDEF(STRCAPI, "invalid capture index") +ERRDEF(STRCAPN, "too many captures") +ERRDEF(STRCAPU, "unfinished capture") +ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format")) +ERRDEF(STRFMTR, "invalid format (repeated flags)") +ERRDEF(STRFMTW, "invalid format (width or precision too long)") +ERRDEF(STRGSRV, "invalid replacement value (a %s)") +ERRDEF(BADMODN, "name conflict for module " LUA_QS) +#if LJ_HASJIT +ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2") +#elif defined(LJ_ARCH_NOJIT) +ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)") +#else +ERRDEF(NOJIT, "JIT compiler permanently disabled by build option") +#endif +ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS) + +/* Lexer/parser errors. */ +ERRDEF(XNEAR, "%s near " LUA_QS) +ERRDEF(XELEM, "lexical element too long") +ERRDEF(XLINES, "chunk has too many lines") +ERRDEF(XLEVELS, "chunk has too many syntax levels") +ERRDEF(XNUMBER, "malformed number") +ERRDEF(XLSTR, "unfinished long string") +ERRDEF(XLCOM, "unfinished long comment") +ERRDEF(XSTR, "unfinished string") +ERRDEF(XESC, "invalid escape sequence") +ERRDEF(XLDELIM, "invalid long string delimiter") +ERRDEF(XTOKEN, LUA_QS " expected") +ERRDEF(XJUMP, "control structure too long") +ERRDEF(XSLOTS, "function or expression too complex") +ERRDEF(XLIMC, "chunk has more than %d local variables") +ERRDEF(XLIMM, "main function has more than %d %s") +ERRDEF(XLIMF, "function at line %d has more than %d %s") +ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)") +ERRDEF(XFIXUP, "function too long for return fixup") +ERRDEF(XPARAM, "<name> or " LUA_QL("...") " expected") +ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)") +ERRDEF(XFUNARG, "function arguments expected") +ERRDEF(XSYMBOL, "unexpected symbol") +ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function") +ERRDEF(XSYNTAX, "syntax error") +ERRDEF(XBREAK, "no loop to break") +ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected") + +/* Bytecode reader errors. */ +ERRDEF(BCFMT, "cannot load incompatible bytecode") +ERRDEF(BCBAD, "cannot load malformed bytecode") + +#if LJ_HASFFI +/* FFI errors. */ +ERRDEF(FFI_INVTYPE, "invalid C type") +ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large") +ERRDEF(FFI_BADSCL, "bad storage class") +ERRDEF(FFI_DECLSPEC, "declaration specifier expected") +ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS) +ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS) +ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS) +ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS) +ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS) +ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS) +ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS) +ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS) +ERRDEF(FFI_BADCALL, LUA_QS " is not callable") +ERRDEF(FFI_NUMARG, "wrong number of arguments for function call") +ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS) +ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed") +ERRDEF(FFI_WRCONST, "attempt to write to constant location") +ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS) +ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields") +ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)") +#endif + +#undef ERRDEF + +/* Detecting unused error messages: + awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh +*/ diff --git a/src/luajit2/src/lj_ff.h b/src/luajit2/src/lj_ff.h new file mode 100644 index 0000000000..0b01e16ab6 --- /dev/null +++ b/src/luajit2/src/lj_ff.h @@ -0,0 +1,18 @@ +/* +** Fast function IDs. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FF_H +#define _LJ_FF_H + +/* Fast function ID. */ +typedef enum { + FF_LUA_ = FF_LUA, /* Lua function (must be 0). */ + FF_C_ = FF_C, /* Regular C function (must be 1). */ +#define FFDEF(name) FF_##name, +#include "lj_ffdef.h" + FF__MAX +} FastFunc; + +#endif diff --git a/src/luajit2/src/lj_ffrecord.c b/src/luajit2/src/lj_ffrecord.c new file mode 100644 index 0000000000..067f66cff2 --- /dev/null +++ b/src/luajit2/src/lj_ffrecord.c @@ -0,0 +1,853 @@ +/* +** Fast function call recorder. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ffrecord_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_crecord.h" +#include "lj_dispatch.h" +#include "lj_vm.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* -- Fast function recording handlers ------------------------------------ */ + +/* Conventions for fast function call handlers: +** +** The argument slots start at J->base[0]. All of them are guaranteed to be +** valid and type-specialized references. J->base[J->maxslot] is set to 0 +** as a sentinel. The runtime argument values start at rd->argv[0]. +** +** In general fast functions should check for presence of all of their +** arguments and for the correct argument types. Some simplifications +** are allowed if the interpreter throws instead. But even if recording +** is aborted, the generated IR must be consistent (no zero-refs). +** +** The number of results in rd->nres is set to 1. Handlers that return +** a different number of results need to override it. A negative value +** prevents return processing (e.g. for pending calls). +** +** Results need to be stored starting at J->base[0]. Return processing +** moves them to the right slots later. +** +** The per-ffid auxiliary data is the value of the 2nd part of the +** LJLIB_REC() annotation. This allows handling similar functionality +** in a common handler. +*/ + +/* Type of handler to record a fast function. */ +typedef void (LJ_FASTCALL *RecordFunc)(jit_State *J, RecordFFData *rd); + +/* Get runtime value of int argument. */ +static int32_t argv2int(jit_State *J, TValue *o) +{ + if (!tvisnumber(o) && !(tvisstr(o) && lj_str_tonumber(strV(o), o))) + lj_trace_err(J, LJ_TRERR_BADTYPE); + return tvisint(o) ? intV(o) : lj_num2int(numV(o)); +} + +/* Get runtime value of string argument. */ +static GCstr *argv2str(jit_State *J, TValue *o) +{ + if (LJ_LIKELY(tvisstr(o))) { + return strV(o); + } else { + GCstr *s; + if (!tvisnumber(o)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + if (tvisint(o)) + s = lj_str_fromint(J->L, intV(o)); + else + s = lj_str_fromnum(J->L, &o->n); + setstrV(J->L, o, s); + return s; + } +} + +/* Return number of results wanted by caller. */ +static ptrdiff_t results_wanted(jit_State *J) +{ + TValue *frame = J->L->base-1; + if (frame_islua(frame)) + return (ptrdiff_t)bc_b(frame_pc(frame)[-1]) - 1; + else + return -1; +} + +/* Throw error for unsupported variant of fast function. */ +LJ_NORET static void recff_nyiu(jit_State *J) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYIFFU); +} + +/* Fallback handler for all fast functions that are not recorded (yet). */ +static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYIFF); + UNUSED(rd); +} + +/* C functions can have arbitrary side-effects and are not recorded (yet). */ +static void LJ_FASTCALL recff_c(jit_State *J, RecordFFData *rd) +{ + setfuncV(J->L, &J->errinfo, J->fn); + lj_trace_err_info(J, LJ_TRERR_NYICF); + UNUSED(rd); +} + +/* -- Base library fast functions ----------------------------------------- */ + +static void LJ_FASTCALL recff_assert(jit_State *J, RecordFFData *rd) +{ + /* Arguments already specialized. The interpreter throws for nil/false. */ + rd->nres = J->maxslot; /* Pass through all arguments. */ +} + +static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd) +{ + /* Arguments already specialized. Result is a constant string. Neat, huh? */ + uint32_t t; + if (tvisnumber(&rd->argv[0])) + t = ~LJ_TNUMX; + else if (LJ_64 && tvislightud(&rd->argv[0])) + t = ~LJ_TLIGHTUD; + else + t = ~itype(&rd->argv[0]); + J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[t])); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_getmetatable(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tr) { + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + if (lj_record_mm_lookup(J, &ix, MM_metatable)) + J->base[0] = ix.mobj; + else + J->base[0] = ix.mt; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + TRef mt = J->base[1]; + if (tref_istab(tr) && (tref_istab(mt) || (mt && tref_isnil(mt)))) { + TRef fref, mtref; + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */ + fref = emitir(IRT(IR_FREF, IRT_P32), tr, IRFL_TAB_META); + mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt; + emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref); + if (!tref_isnil(mt)) + emitir(IRT(IR_TBAR, IRT_TAB), tr, 0); + J->base[0] = tr; + J->needsnap = 1; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawget(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; ix.key = J->base[1]; + if (tref_istab(ix.tab) && ix.key) { + ix.val = 0; ix.idxchain = 0; + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + copyTV(J->L, &ix.keyv, &rd->argv[1]); + J->base[0] = lj_record_idx(J, &ix); + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawset(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; ix.key = J->base[1]; ix.val = J->base[2]; + if (tref_istab(ix.tab) && ix.key && ix.val) { + ix.idxchain = 0; + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + copyTV(J->L, &ix.keyv, &rd->argv[1]); + copyTV(J->L, &ix.valv, &rd->argv[2]); + lj_record_idx(J, &ix); + /* Pass through table at J->base[0] as result. */ + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_rawequal(jit_State *J, RecordFFData *rd) +{ + TRef tra = J->base[0]; + TRef trb = J->base[1]; + if (tra && trb) { + int diff = lj_record_objcmp(J, tra, trb, &rd->argv[0], &rd->argv[1]); + J->base[0] = diff ? TREF_FALSE : TREF_TRUE; + } /* else: Interpreter will throw. */ +} + +/* Determine mode of select() call. */ +int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv) +{ + if (tref_isstr(tr) && *strVdata(tv) == '#') { /* select('#', ...) */ + if (strV(tv)->len == 1) { + emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv))); + } else { + TRef trptr = emitir(IRT(IR_STRREF, IRT_P32), tr, lj_ir_kint(J, 0)); + TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY); + emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#')); + } + return 0; + } else { /* select(n, ...) */ + int32_t start = argv2int(J, tv); + if (start == 0) lj_trace_err(J, LJ_TRERR_BADTYPE); /* A bit misleading. */ + return start; + } +} + +static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tr) { + ptrdiff_t start = lj_ffrecord_select_mode(J, tr, &rd->argv[0]); + if (start == 0) { /* select('#', ...) */ + J->base[0] = lj_ir_kint(J, J->maxslot - 1); + } else if (tref_isk(tr)) { /* select(k, ...) */ + ptrdiff_t n = (ptrdiff_t)J->maxslot; + if (start < 0) start += n; + else if (start > n) start = n; + rd->nres = n - start; + if (start >= 1) { + ptrdiff_t i; + for (i = 0; i < n - start; i++) + J->base[i] = J->base[start+i]; + } /* else: Interpreter will throw. */ + } else { + recff_nyiu(J); + } + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + TRef base = J->base[1]; + if (tr && base) { + base = lj_opt_narrow_toint(J, base); + if (!tref_isk(base) || IR(tref_ref(base))->i != 10) + recff_nyiu(J); + } + if (tref_isnumber_str(tr)) { + if (tref_isstr(tr)) { + TValue tmp; + if (!lj_str_tonum(strV(&rd->argv[0]), &tmp)) + recff_nyiu(J); /* Would need an inverted STRTO for this case. */ + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + } +#if LJ_HASFFI + } else if (tref_iscdata(tr)) { + lj_crecord_tonumber(J, rd); + return; +#endif + } else { + tr = TREF_NIL; + } + J->base[0] = tr; + UNUSED(rd); +} + +static TValue *recff_metacall_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + lj_record_tailcall(J, 0, 1); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm) +{ + RecordIndex ix; + ix.tab = J->base[0]; + copyTV(J->L, &ix.tabv, &rd->argv[0]); + if (lj_record_mm_lookup(J, &ix, mm)) { /* Has metamethod? */ + int errcode; + /* Temporarily insert metamethod below object. */ + J->base[1] = J->base[0]; + J->base[0] = ix.mobj; + copyTV(J->L, &rd->argv[1], &rd->argv[0]); + copyTV(J->L, &rd->argv[0], &ix.mobjv); + /* Need to protect lj_record_tailcall because it may throw. */ + errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp); + /* Always undo Lua stack changes to avoid confusing the interpreter. */ + copyTV(J->L, &rd->argv[0], &rd->argv[1]); + if (errcode) + lj_err_throw(J->L, errcode); /* Propagate errors. */ + rd->nres = -1; /* Pending call. */ + return 1; /* Tailcalled to metamethod. */ + } + return 0; +} + +static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_isstr(tr)) { + /* Ignore __tostring in the string base metatable. */ + /* Pass on result in J->base[0]. */ + } else if (!recff_metacall(J, rd, MM_tostring)) { + if (tref_isnumber(tr)) { + J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0); + } else if (tref_ispri(tr)) { + J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[tref_type(tr)])); + } else { + recff_nyiu(J); + } + } +} + +static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; + if (tref_istab(ix.tab)) { + if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */ + lj_trace_err(J, LJ_TRERR_BADTYPE); + setintV(&ix.keyv, numberVint(&rd->argv[1])+1); + settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); + ix.val = 0; ix.idxchain = 0; + ix.key = lj_opt_narrow_toint(J, J->base[1]); + J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1)); + J->base[1] = lj_record_idx(J, &ix); + rd->nres = tref_isnil(J->base[1]) ? 0 : 2; + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_ipairs(jit_State *J, RecordFFData *rd) +{ +#ifdef LUAJIT_ENABLE_LUA52COMPAT + if (!recff_metacall(J, rd, MM_ipairs)) +#endif + { + TRef tab = J->base[0]; + if (tref_istab(tab)) { + J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0])); + J->base[1] = tab; + J->base[2] = lj_ir_kint(J, 0); + rd->nres = 3; + } /* else: Interpreter will throw. */ + } +} + +static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd) +{ + if (J->maxslot >= 1) { + lj_record_call(J, 0, J->maxslot - 1); + rd->nres = -1; /* Pending call. */ + } /* else: Interpreter will throw. */ +} + +static TValue *recff_xpcall_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + lj_record_call(J, 1, J->maxslot - 2); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd) +{ + if (J->maxslot >= 2) { + TValue argv0, argv1; + TRef tmp; + int errcode; + /* Swap function and traceback. */ + tmp = J->base[0]; J->base[0] = J->base[1]; J->base[1] = tmp; + copyTV(J->L, &argv0, &rd->argv[0]); + copyTV(J->L, &argv1, &rd->argv[1]); + copyTV(J->L, &rd->argv[0], &argv1); + copyTV(J->L, &rd->argv[1], &argv0); + /* Need to protect lj_record_call because it may throw. */ + errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp); + /* Always undo Lua stack swap to avoid confusing the interpreter. */ + copyTV(J->L, &rd->argv[0], &argv0); + copyTV(J->L, &rd->argv[1], &argv1); + if (errcode) + lj_err_throw(J->L, errcode); /* Propagate errors. */ + rd->nres = -1; /* Pending call. */ + } /* else: Interpreter will throw. */ +} + +/* -- Math library fast functions ----------------------------------------- */ + +static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J)); + UNUSED(rd); +} + +/* Record rounding functions math.floor and math.ceil. */ +static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */ + tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data); + /* Result is integral (or NaN/Inf), but may not fit an int32_t. */ + if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */ + lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data); + if (n == (lua_Number)lj_num2int(n)) + tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK); + } + J->base[0] = tr; + } +} + +/* Record unary math.* functions, mapped to IR_FPMATH opcode. */ +static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd) +{ + J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data); +} + +/* Record math.atan2. */ +static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + TRef tr2 = lj_ir_tonum(J, J->base[1]); + J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2); + UNUSED(rd); +} + +/* Record math.ldexp. */ +static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); +#if LJ_TARGET_X86ORX64 + TRef tr2 = lj_ir_tonum(J, J->base[1]); +#else + TRef tr2 = lj_opt_narrow_toint(J, J->base[1]); +#endif + J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2); + UNUSED(rd); +} + +/* Record math.asin, math.acos, math.atan. */ +static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd) +{ + TRef y = lj_ir_tonum(J, J->base[0]); + TRef x = lj_ir_knum_one(J); + uint32_t ffid = rd->data; + if (ffid != FF_math_atan) { + TRef tmp = emitir(IRTN(IR_MUL), y, y); + tmp = emitir(IRTN(IR_SUB), x, tmp); + tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT); + if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; } + } + J->base[0] = emitir(IRTN(IR_ATAN2), y, x); +} + +static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + J->base[0] = lj_ir_call(J, rd->data, tr); +} + +static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd) +{ + TRef tr = J->base[0]; + if (tref_isinteger(tr)) { + J->base[0] = tr; + J->base[1] = lj_ir_kint(J, 0); + } else { + TRef trt; + tr = lj_ir_tonum(J, tr); + trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC); + J->base[0] = trt; + J->base[1] = emitir(IRTN(IR_SUB), tr, trt); + } + rd->nres = 2; +} + +static void LJ_FASTCALL recff_math_degrad(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + TRef trm = lj_ir_knum(J, numV(&J->fn->c.upvalue[0])); + J->base[0] = emitir(IRTN(IR_MUL), tr, trm); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonum(J, J->base[0]); + if (!tref_isnumber_str(J->base[1])) + lj_trace_err(J, LJ_TRERR_BADTYPE); + J->base[0] = lj_opt_narrow_pow(J, tr, J->base[1], &rd->argv[1]); + UNUSED(rd); +} + +static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_ir_tonumber(J, J->base[0]); + uint32_t op = rd->data; + BCReg i; + for (i = 1; J->base[i] != 0; i++) { + TRef tr2 = lj_ir_tonumber(J, J->base[i]); + IRType t = IRT_INT; + if (!(tref_isinteger(tr) && tref_isinteger(tr2))) { + if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT); + t = IRT_NUM; + } + tr = emitir(IRT(op, t), tr, tr2); + } + J->base[0] = tr; +} + +static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd) +{ + GCudata *ud = udataV(&J->fn->c.upvalue[0]); + TRef tr, one; + lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */ + tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud))); + one = lj_ir_knum_one(J); + tr = emitir(IRTN(IR_SUB), tr, one); + if (J->base[0]) { + TRef tr1 = lj_ir_tonum(J, J->base[0]); + if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */ + TRef tr2 = lj_ir_tonum(J, J->base[1]); + tr2 = emitir(IRTN(IR_SUB), tr2, tr1); + tr2 = emitir(IRTN(IR_ADD), tr2, one); + tr = emitir(IRTN(IR_MUL), tr, tr2); + tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); + tr = emitir(IRTN(IR_ADD), tr, tr1); + } else { /* d = floor(d*r1) + 1.0 */ + tr = emitir(IRTN(IR_MUL), tr, tr1); + tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); + tr = emitir(IRTN(IR_ADD), tr, one); + } + } + J->base[0] = tr; + UNUSED(rd); +} + +/* -- Bit library fast functions ------------------------------------------ */ + +/* Record unary bit.tobit, bit.bnot, bit.bswap. */ +static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_opt_narrow_tobit(J, J->base[0]); + J->base[0] = (rd->data == IR_TOBIT) ? tr : emitir(IRTI(rd->data), tr, 0); +} + +/* Record N-ary bit.band, bit.bor, bit.bxor. */ +static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_opt_narrow_tobit(J, J->base[0]); + uint32_t op = rd->data; + BCReg i; + for (i = 1; J->base[i] != 0; i++) + tr = emitir(IRTI(op), tr, lj_opt_narrow_tobit(J, J->base[i])); + J->base[0] = tr; +} + +/* Record bit shifts. */ +static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd) +{ + TRef tr = lj_opt_narrow_tobit(J, J->base[0]); + TRef tsh = lj_opt_narrow_tobit(J, J->base[1]); + IROp op = (IROp)rd->data; + if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && + !tref_isk(tsh)) + tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31)); +#ifdef LJ_TARGET_UNIFYROT + if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { + op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; + tsh = emitir(IRTI(IR_NEG), tsh, tsh); + } +#endif + J->base[0] = emitir(IRTI(op), tr, tsh); +} + +/* -- String library fast functions --------------------------------------- */ + +static void LJ_FASTCALL recff_string_len(jit_State *J, RecordFFData *rd) +{ + J->base[0] = emitir(IRTI(IR_FLOAD), lj_ir_tostr(J, J->base[0]), IRFL_STR_LEN); + UNUSED(rd); +} + +/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */ +static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd) +{ + TRef trstr = lj_ir_tostr(J, J->base[0]); + TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); + TRef tr0 = lj_ir_kint(J, 0); + TRef trstart, trend; + GCstr *str = argv2str(J, &rd->argv[0]); + int32_t start, end; + if (rd->data) { /* string.sub(str, start [,end]) */ + start = argv2int(J, &rd->argv[1]); + trstart = lj_opt_narrow_toint(J, J->base[1]); + trend = J->base[2]; + if (tref_isnil(trend)) { + trend = lj_ir_kint(J, -1); + end = -1; + } else { + trend = lj_opt_narrow_toint(J, trend); + end = argv2int(J, &rd->argv[2]); + } + } else { /* string.byte(str, [,start [,end]]) */ + if (J->base[1]) { + start = argv2int(J, &rd->argv[1]); + trstart = lj_opt_narrow_toint(J, J->base[1]); + trend = J->base[2]; + if (tref_isnil(trend)) { + trend = trstart; + end = start; + } else { + trend = lj_opt_narrow_toint(J, trend); + end = argv2int(J, &rd->argv[2]); + } + } else { + trend = trstart = lj_ir_kint(J, 1); + end = start = 1; + } + } + if (end < 0) { + emitir(IRTGI(IR_LT), trend, tr0); + trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend), + lj_ir_kint(J, 1)); + end = end+(int32_t)str->len+1; + } else if ((MSize)end <= str->len) { + emitir(IRTGI(IR_ULE), trend, trlen); + } else { + emitir(IRTGI(IR_GT), trend, trlen); + end = (int32_t)str->len; + trend = trlen; + } + if (start < 0) { + emitir(IRTGI(IR_LT), trstart, tr0); + trstart = emitir(IRTI(IR_ADD), trlen, trstart); + start = start+(int32_t)str->len; + emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), trstart, tr0); + if (start < 0) { + trstart = tr0; + start = 0; + } + } else { + if (start == 0) { + emitir(IRTGI(IR_EQ), trstart, tr0); + trstart = tr0; + } else { + trstart = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, -1)); + emitir(IRTGI(IR_GE), trstart, tr0); + start--; + } + } + if (rd->data) { /* Return string.sub result. */ + if (end - start >= 0) { + /* Also handle empty range here, to avoid extra traces. */ + TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart); + emitir(IRTGI(IR_GE), trslen, tr0); + trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); + J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen); + } else { /* Range underflow: return empty string. */ + emitir(IRTGI(IR_LT), trend, trstart); + J->base[0] = lj_ir_kstr(J, lj_str_new(J->L, strdata(str), 0)); + } + } else { /* Return string.byte result(s). */ + ptrdiff_t i, len = end - start; + if (len > 0) { + TRef trslen = emitir(IRTI(IR_SUB), trend, trstart); + emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len)); + if (J->baseslot + len > LJ_MAX_JSLOTS) + lj_trace_err_info(J, LJ_TRERR_STACKOV); + rd->nres = len; + for (i = 0; i < len; i++) { + TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i)); + tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp); + J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY); + } + } else { /* Empty range or range underflow: return no results. */ + emitir(IRTGI(IR_LE), trend, trstart); + rd->nres = 0; + } + } +} + +/* -- Table library fast functions ---------------------------------------- */ + +static void LJ_FASTCALL recff_table_getn(jit_State *J, RecordFFData *rd) +{ + if (tref_istab(J->base[0])) + J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, J->base[0]); + /* else: Interpreter will throw. */ + UNUSED(rd); +} + +static void LJ_FASTCALL recff_table_remove(jit_State *J, RecordFFData *rd) +{ + TRef tab = J->base[0]; + rd->nres = 0; + if (tref_istab(tab)) { + if (!J->base[1] || tref_isnil(J->base[1])) { /* Simple pop: t[#t] = nil */ + TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, tab); + GCtab *t = tabV(&rd->argv[0]); + MSize len = lj_tab_len(t); + emitir(IRTGI(len ? IR_NE : IR_EQ), trlen, lj_ir_kint(J, 0)); + if (len) { + RecordIndex ix; + ix.tab = tab; + ix.key = trlen; + settabV(J->L, &ix.tabv, t); + setintV(&ix.keyv, len); + ix.idxchain = 0; + if (results_wanted(J) != 0) { /* Specialize load only if needed. */ + ix.val = 0; + J->base[0] = lj_record_idx(J, &ix); /* Load previous value. */ + rd->nres = 1; + /* Assumes ix.key/ix.tab is not modified for raw lj_record_idx(). */ + } + ix.val = TREF_NIL; + lj_record_idx(J, &ix); /* Remove value. */ + } + } else { /* Complex case: remove in the middle. */ + recff_nyiu(J); + } + } /* else: Interpreter will throw. */ +} + +static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd) +{ + RecordIndex ix; + ix.tab = J->base[0]; + ix.val = J->base[1]; + rd->nres = 0; + if (tref_istab(ix.tab) && ix.val) { + if (!J->base[2]) { /* Simple push: t[#t+1] = v */ + TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab); + GCtab *t = tabV(&rd->argv[0]); + ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); + settabV(J->L, &ix.tabv, t); + setintV(&ix.keyv, lj_tab_len(t) + 1); + ix.idxchain = 0; + lj_record_idx(J, &ix); /* Set new value. */ + } else { /* Complex case: insert in the middle. */ + recff_nyiu(J); + } + } /* else: Interpreter will throw. */ +} + +/* -- I/O library fast functions ------------------------------------------ */ + +/* Get FILE* for I/O function. Any I/O error aborts recording, so there's +** no need to encode the alternate cases for any of the guards. +*/ +static TRef recff_io_fp(jit_State *J, uint32_t id) +{ + TRef tr, ud, fp; + if (id) { /* io.func() */ + tr = lj_ir_kptr(J, &J2G(J)->gcroot[id]); + ud = emitir(IRT(IR_XLOAD, IRT_UDATA), tr, 0); + } else { /* fp:method() */ + ud = J->base[0]; + if (!tref_isudata(ud)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + tr = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE)); + } + fp = emitir(IRT(IR_FLOAD, IRT_PTR), ud, IRFL_UDATA_FILE); + emitir(IRTG(IR_NE, IRT_PTR), fp, lj_ir_knull(J, IRT_PTR)); + return fp; +} + +static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd) +{ + TRef fp = recff_io_fp(J, rd->data); + TRef zero = lj_ir_kint(J, 0); + TRef one = lj_ir_kint(J, 1); + ptrdiff_t i = rd->data == 0 ? 1 : 0; + for (; J->base[i]; i++) { + TRef str = lj_ir_tostr(J, J->base[i]); + TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero); + TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN); + if (tref_isk(len) && IR(tref_ref(len))->i == 1) { + TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY); + tr = lj_ir_call(J, IRCALL_fputc, tr, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1)); + } else { + TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_EQ), tr, len); + } + } + J->base[0] = TREF_TRUE; +} + +static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd) +{ + TRef fp = recff_io_fp(J, rd->data); + TRef tr = lj_ir_call(J, IRCALL_fflush, fp); + if (results_wanted(J) != 0) /* Check result only if not ignored. */ + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); + J->base[0] = TREF_TRUE; +} + +/* -- Record calls to fast functions -------------------------------------- */ + +#include "lj_recdef.h" + +static uint32_t recdef_lookup(GCfunc *fn) +{ + if (fn->c.ffid < sizeof(recff_idmap)/sizeof(recff_idmap[0])) + return recff_idmap[fn->c.ffid]; + else + return 0; +} + +/* Record entry to a fast function or C function. */ +void lj_ffrecord_func(jit_State *J) +{ + RecordFFData rd; + uint32_t m = recdef_lookup(J->fn); + rd.data = m & 0xff; + rd.nres = 1; /* Default is one result. */ + rd.argv = J->L->base; + J->base[J->maxslot] = 0; /* Mark end of arguments. */ + (recff_func[m >> 8])(J, &rd); /* Call recff_* handler. */ + if (rd.nres >= 0) { + if (J->postproc == LJ_POST_NONE) J->postproc = LJ_POST_FFRETRY; + lj_record_ret(J, 0, rd.nres); + } +} + +#undef IR +#undef emitir + +#endif diff --git a/src/luajit2/src/lj_ffrecord.h b/src/luajit2/src/lj_ffrecord.h new file mode 100644 index 0000000000..7ff270a039 --- /dev/null +++ b/src/luajit2/src/lj_ffrecord.h @@ -0,0 +1,24 @@ +/* +** Fast function call recorder. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FFRECORD_H +#define _LJ_FFRECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* Data used by handlers to record a fast function. */ +typedef struct RecordFFData { + TValue *argv; /* Runtime argument values. */ + ptrdiff_t nres; /* Number of returned results (defaults to 1). */ + uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */ +} RecordFFData; + +LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv); +LJ_FUNC void lj_ffrecord_func(jit_State *J); +#endif + +#endif diff --git a/src/luajit2/src/lj_frame.h b/src/luajit2/src/lj_frame.h new file mode 100644 index 0000000000..4a2a767afa --- /dev/null +++ b/src/luajit2/src/lj_frame.h @@ -0,0 +1,138 @@ +/* +** Stack frames. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FRAME_H +#define _LJ_FRAME_H + +#include "lj_obj.h" +#include "lj_bc.h" + +/* -- Lua stack frame ----------------------------------------------------- */ + +/* Frame type markers in callee function slot (callee base-1). */ +enum { + FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG, + FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH +}; +#define FRAME_TYPE 3 +#define FRAME_P 4 +#define FRAME_TYPEP (FRAME_TYPE|FRAME_P) + +/* Macros to access and modify Lua frames. */ +#define frame_gc(f) (gcref((f)->fr.func)) +#define frame_func(f) (&frame_gc(f)->fn) +#define frame_ftsz(f) ((f)->fr.tp.ftsz) + +#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE) +#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP) +#define frame_islua(f) (frame_type(f) == FRAME_LUA) +#define frame_isc(f) (frame_type(f) == FRAME_C) +#define frame_iscont(f) (frame_typep(f) == FRAME_CONT) +#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG) +#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL) + +#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns)) +#define frame_contpc(f) (frame_pc((f)-1)) +#if LJ_64 +#define frame_contf(f) \ + ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \ + (intptr_t)(int32_t)((f)-1)->u32.lo)) +#else +#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void)) +#endif +#define frame_delta(f) (frame_ftsz(f) >> 3) +#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP) + +#define frame_prevl(f) ((f) - (1+bc_a(frame_pc(f)[-1]))) +#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f))) +#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f)) +/* Note: this macro does not skip over FRAME_VARG. */ + +#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc))) +#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (sz)) +#define setframe_gc(f, p) (setgcref((f)->fr.func, (p))) + +/* -- C stack frame ------------------------------------------------------- */ + +/* Macros to access and modify the C stack frame chain. */ + +/* These definitions must match with the arch-specific *.dasc files. */ +#if LJ_TARGET_X86 +#define CFRAME_OFS_ERRF (15*4) +#define CFRAME_OFS_NRES (14*4) +#define CFRAME_OFS_PREV (13*4) +#define CFRAME_OFS_L (12*4) +#define CFRAME_OFS_PC (6*4) +#define CFRAME_OFS_MULTRES (5*4) +#define CFRAME_SIZE (12*4) +#define CFRAME_SIZE_JIT CFRAME_SIZE +#define CFRAME_SHIFT_MULTRES 0 +#elif LJ_TARGET_X64 +#if LJ_ABI_WIN +#define CFRAME_OFS_PREV (13*8) +#define CFRAME_OFS_PC (25*4) +#define CFRAME_OFS_L (24*4) +#define CFRAME_OFS_ERRF (23*4) +#define CFRAME_OFS_NRES (22*4) +#define CFRAME_OFS_MULTRES (21*4) +#define CFRAME_SIZE (10*8) +#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8) +#define CFRAME_SHIFT_MULTRES 0 +#else +#define CFRAME_OFS_PREV (4*8) +#define CFRAME_OFS_PC (7*4) +#define CFRAME_OFS_L (6*4) +#define CFRAME_OFS_ERRF (5*4) +#define CFRAME_OFS_NRES (4*4) +#define CFRAME_OFS_MULTRES (1*4) +#define CFRAME_SIZE (10*8) +#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16) +#define CFRAME_SHIFT_MULTRES 0 +#endif +#elif LJ_TARGET_ARM +#define CFRAME_OFS_ERRF 24 +#define CFRAME_OFS_NRES 20 +#define CFRAME_OFS_PREV 16 +#define CFRAME_OFS_L 12 +#define CFRAME_OFS_PC 8 +#define CFRAME_OFS_MULTRES 4 +#define CFRAME_SIZE 64 +#define CFRAME_SIZE_JIT CFRAME_SIZE +#define CFRAME_SHIFT_MULTRES 3 +#elif LJ_TARGET_PPCSPE +#define CFRAME_OFS_ERRF 28 +#define CFRAME_OFS_NRES 24 +#define CFRAME_OFS_PREV 20 +#define CFRAME_OFS_L 16 +#define CFRAME_OFS_PC 12 +#define CFRAME_OFS_MULTRES 8 +#define CFRAME_SIZE 176 +#define CFRAME_SIZE_JIT CFRAME_SIZE +#define CFRAME_SHIFT_MULTRES 3 +#else +#error "Missing CFRAME_* definitions for this architecture" +#endif + +#define CFRAME_RESUME 1 +#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */ +#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF)) + +#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF)) +#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES)) +#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV)) +#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES)) +#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES) +#define cframe_L(cf) \ + (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th) +#define cframe_pc(cf) \ + (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns)) +#define setcframe_pc(cf, pc) \ + (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc))) +#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME) +#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF) +#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK)) +#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe)) + +#endif diff --git a/src/luajit2/src/lj_func.c b/src/luajit2/src/lj_func.c new file mode 100644 index 0000000000..334ba4c850 --- /dev/null +++ b/src/luajit2/src/lj_func.c @@ -0,0 +1,180 @@ +/* +** Function handling (prototypes, functions and upvalues). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_func_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_func.h" +#include "lj_trace.h" +#include "lj_vm.h" + +/* -- Prototypes ---------------------------------------------------------- */ + +void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt) +{ + lj_mem_free(g, pt, pt->sizept); +} + +/* -- Upvalues ------------------------------------------------------------ */ + +static void unlinkuv(GCupval *uv) +{ + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + setgcrefr(uvnext(uv)->prev, uv->prev); + setgcrefr(uvprev(uv)->next, uv->next); +} + +/* Find existing open upvalue for a stack slot or create a new one. */ +static GCupval *func_finduv(lua_State *L, TValue *slot) +{ + global_State *g = G(L); + GCRef *pp = &L->openupval; + GCupval *p; + GCupval *uv; + /* Search the sorted list of open upvalues. */ + while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { + lua_assert(!p->closed && uvval(p) != &p->tv); + if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ + if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ + flipwhite(obj2gco(p)); + return p; + } + pp = &p->nextgc; + } + /* No matching upvalue found. Create a new one. */ + uv = lj_mem_newt(L, sizeof(GCupval), GCupval); + newwhite(g, uv); + uv->gct = ~LJ_TUPVAL; + uv->closed = 0; /* Still open. */ + setmref(uv->v, slot); /* Pointing to the stack slot. */ + /* NOBARRIER: The GCupval is new (marked white) and open. */ + setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */ + setgcref(*pp, obj2gco(uv)); + setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */ + setgcrefr(uv->next, g->uvhead.next); + setgcref(uvnext(uv)->prev, obj2gco(uv)); + setgcref(g->uvhead.next, obj2gco(uv)); + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + return uv; +} + +/* Create an empty and closed upvalue. */ +static GCupval *func_emptyuv(lua_State *L) +{ + GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval)); + uv->gct = ~LJ_TUPVAL; + uv->closed = 1; + setnilV(&uv->tv); + setmref(uv->v, &uv->tv); + return uv; +} + +/* Close all open upvalues pointing to some stack level or above. */ +void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level) +{ + GCupval *uv; + global_State *g = G(L); + while (gcref(L->openupval) != NULL && + uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { + GCobj *o = obj2gco(uv); + lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv); + setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ + if (isdead(g, o)) { + lj_func_freeuv(g, uv); + } else { + unlinkuv(uv); + lj_gc_closeuv(g, uv); + } + } +} + +void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) +{ + if (!uv->closed) + unlinkuv(uv); + lj_mem_freet(g, uv); +} + +/* -- Functions (closures) ------------------------------------------------ */ + +GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env) +{ + GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems)); + fn->c.gct = ~LJ_TFUNC; + fn->c.ffid = FF_C; + fn->c.nupvalues = (uint8_t)nelems; + /* NOBARRIER: The GCfunc is new (marked white). */ + setmref(fn->c.pc, &G(L)->bc_cfunc_ext); + setgcref(fn->c.env, obj2gco(env)); + return fn; +} + +static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env) +{ + GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv)); + fn->l.gct = ~LJ_TFUNC; + fn->l.ffid = FF_LUA; + fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */ + /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */ + setmref(fn->l.pc, proto_bc(pt)); + setgcref(fn->l.env, obj2gco(env)); + return fn; +} + +/* Create a new Lua function with empty upvalues. */ +GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env) +{ + GCfunc *fn = func_newL(L, pt, env); + MSize i, nuv = pt->sizeuv; + /* NOBARRIER: The GCfunc is new (marked white). */ + for (i = 0; i < nuv; i++) { + GCupval *uv = func_emptyuv(L); + uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24); + setgcref(fn->l.uvptr[i], obj2gco(uv)); + } + fn->l.nupvalues = (uint8_t)nuv; + return fn; +} + +/* Do a GC check and create a new Lua function with inherited upvalues. */ +GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent) +{ + GCfunc *fn; + GCRef *puv; + MSize i, nuv; + TValue *base; + lj_gc_check_fixtop(L); + fn = func_newL(L, pt, tabref(parent->env)); + /* NOBARRIER: The GCfunc is new (marked white). */ + puv = parent->uvptr; + nuv = pt->sizeuv; + base = L->base; + for (i = 0; i < nuv; i++) { + uint32_t v = proto_uv(pt)[i]; + GCupval *uv; + if ((v & 0x8000)) { + uv = func_finduv(L, base + (v & 0xff)); + uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24); + } else { + uv = &gcref(puv[v])->uv; + } + setgcref(fn->l.uvptr[i], obj2gco(uv)); + } + fn->l.nupvalues = (uint8_t)nuv; + return fn; +} + +void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn) +{ + MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : + sizeCfunc((MSize)fn->c.nupvalues); + lj_mem_free(g, fn, size); +} + diff --git a/src/luajit2/src/lj_func.h b/src/luajit2/src/lj_func.h new file mode 100644 index 0000000000..6ec4ab95e9 --- /dev/null +++ b/src/luajit2/src/lj_func.h @@ -0,0 +1,24 @@ +/* +** Function handling (prototypes, functions and upvalues). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_FUNC_H +#define _LJ_FUNC_H + +#include "lj_obj.h" + +/* Prototypes. */ +LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt); + +/* Upvalues. */ +LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level); +LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv); + +/* Functions (closures). */ +LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env); +LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env); +LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent); +LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c); + +#endif diff --git a/src/luajit2/src/lj_gc.c b/src/luajit2/src/lj_gc.c new file mode 100644 index 0000000000..1985abc78c --- /dev/null +++ b/src/luajit2/src/lj_gc.c @@ -0,0 +1,838 @@ +/* +** Garbage collector. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_gc_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_udata.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#endif +#include "lj_trace.h" +#include "lj_vm.h" + +#define GCSTEPSIZE 1024u +#define GCSWEEPMAX 40 +#define GCSWEEPCOST 10 +#define GCFINALIZECOST 100 + +/* Macros to set GCobj colors and flags. */ +#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES) +#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK) +#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED) +#define markfinalized(u) ((u)->marked |= LJ_GC_FINALIZED) + +/* -- Mark phase ---------------------------------------------------------- */ + +/* Mark a TValue (if needed). */ +#define gc_marktv(g, tv) \ + { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \ + if (tviswhite(tv)) gc_mark(g, gcV(tv)); } + +/* Mark a GCobj (if needed). */ +#define gc_markobj(g, o) \ + { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); } + +/* Mark a string object. */ +#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES) + +/* Mark a white GCobj. */ +static void gc_mark(global_State *g, GCobj *o) +{ + lua_assert(iswhite(o) && !isdead(g, o)); + white2gray(o); + if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUDATA)) { + GCtab *mt = tabref(gco2ud(o)->metatable); + gray2black(o); /* Userdata are never gray. */ + if (mt) gc_markobj(g, mt); + gc_markobj(g, tabref(gco2ud(o)->env)); + } else if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUPVAL)) { + GCupval *uv = gco2uv(o); + gc_marktv(g, uvval(uv)); + if (uv->closed) + gray2black(o); /* Closed upvalues are never gray. */ + } else if (o->gch.gct != ~LJ_TSTR && o->gch.gct != ~LJ_TCDATA) { + lua_assert(o->gch.gct == ~LJ_TFUNC || o->gch.gct == ~LJ_TTAB || + o->gch.gct == ~LJ_TTHREAD || o->gch.gct == ~LJ_TPROTO); + setgcrefr(o->gch.gclist, g->gc.gray); + setgcref(g->gc.gray, o); + } +} + +/* Mark GC roots. */ +static void gc_mark_gcroot(global_State *g) +{ + ptrdiff_t i; + for (i = 0; i < GCROOT_MAX; i++) + if (gcref(g->gcroot[i]) != NULL) + gc_markobj(g, gcref(g->gcroot[i])); +} + +/* Start a GC cycle and mark the root set. */ +static void gc_mark_start(global_State *g) +{ + setgcrefnull(g->gc.gray); + setgcrefnull(g->gc.grayagain); + setgcrefnull(g->gc.weak); + gc_markobj(g, mainthread(g)); + gc_markobj(g, tabref(mainthread(g)->env)); + gc_marktv(g, &g->registrytv); + gc_mark_gcroot(g); + g->gc.state = GCSpropagate; +} + +/* Mark open upvalues. */ +static void gc_mark_uv(global_State *g) +{ + GCupval *uv; + for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { + lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); + if (isgray(obj2gco(uv))) + gc_marktv(g, uvval(uv)); + } +} + +/* Mark userdata in mmudata list. */ +static void gc_mark_mmudata(global_State *g) +{ + GCobj *root = gcref(g->gc.mmudata); + GCobj *u = root; + if (u) { + do { + u = gcnext(u); + makewhite(g, u); /* Could be from previous GC. */ + gc_mark(g, u); + } while (u != root); + } +} + +/* Separate userdata which which needs finalization to mmudata list. */ +size_t lj_gc_separateudata(global_State *g, int all) +{ + size_t m = 0; + GCRef *p = &mainthread(g)->nextgc; + GCobj *o; + while ((o = gcref(*p)) != NULL) { + if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) { + p = &o->gch.nextgc; /* Nothing to do. */ + } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) { + markfinalized(gco2ud(o)); /* Done, as there's no __gc metamethod. */ + p = &o->gch.nextgc; + } else { /* Otherwise move userdata to be finalized to mmudata list. */ + m += sizeudata(gco2ud(o)); + markfinalized(gco2ud(o)); + *p = o->gch.nextgc; + if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */ + GCobj *root = gcref(g->gc.mmudata); + setgcrefr(o->gch.nextgc, root->gch.nextgc); + setgcref(root->gch.nextgc, o); + setgcref(g->gc.mmudata, o); + } else { /* Create circular list. */ + setgcref(o->gch.nextgc, o); + setgcref(g->gc.mmudata, o); + } + } + } + return m; +} + +/* -- Propagation phase --------------------------------------------------- */ + +/* Traverse a table. */ +static int gc_traverse_tab(global_State *g, GCtab *t) +{ + int weak = 0; + cTValue *mode; + GCtab *mt = tabref(t->metatable); + if (mt) + gc_markobj(g, mt); + mode = lj_meta_fastg(g, mt, MM_mode); + if (mode && tvisstr(mode)) { /* Valid __mode field? */ + const char *modestr = strVdata(mode); + int c; + while ((c = *modestr++)) { + if (c == 'k') weak |= LJ_GC_WEAKKEY; + else if (c == 'v') weak |= LJ_GC_WEAKVAL; + else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL); + } + if (weak > 0) { /* Weak tables are cleared in the atomic phase. */ + t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak); + setgcrefr(t->gclist, g->gc.weak); + setgcref(g->gc.weak, obj2gco(t)); + } + } + if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */ + return 1; + if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */ + MSize i, asize = t->asize; + for (i = 0; i < asize; i++) + gc_marktv(g, arrayslot(t, i)); + } + if (t->hmask > 0) { /* Mark hash part. */ + Node *node = noderef(t->node); + MSize i, hmask = t->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ + lua_assert(!tvisnil(&n->key)); + if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); + if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); + } + } + } + return weak; +} + +/* Traverse a function. */ +static void gc_traverse_func(global_State *g, GCfunc *fn) +{ + gc_markobj(g, tabref(fn->c.env)); + if (isluafunc(fn)) { + uint32_t i; + lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv); + gc_markobj(g, funcproto(fn)); + for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ + gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); + } else { + uint32_t i; + for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */ + gc_marktv(g, &fn->c.upvalue[i]); + } +} + +#if LJ_HASJIT +/* Mark a trace. */ +static void gc_marktrace(global_State *g, TraceNo traceno) +{ + GCobj *o = obj2gco(traceref(G2J(g), traceno)); + lua_assert(traceno != G2J(g)->cur.traceno); + if (iswhite(o)) { + white2gray(o); + setgcrefr(o->gch.gclist, g->gc.gray); + setgcref(g->gc.gray, o); + } +} + +/* Traverse a trace. */ +static void gc_traverse_trace(global_State *g, GCtrace *T) +{ + IRRef ref; + if (T->traceno == 0) return; + for (ref = T->nk; ref < REF_TRUE; ref++) { + IRIns *ir = &T->ir[ref]; + if (ir->o == IR_KGC) + gc_markobj(g, ir_kgc(ir)); + } + if (T->link) gc_marktrace(g, T->link); + if (T->nextroot) gc_marktrace(g, T->nextroot); + if (T->nextside) gc_marktrace(g, T->nextside); + gc_markobj(g, gcref(T->startpt)); +} + +/* The current trace is a GC root while not anchored in the prototype (yet). */ +#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur) +#else +#define gc_traverse_curtrace(g) UNUSED(g) +#endif + +/* Traverse a prototype. */ +static void gc_traverse_proto(global_State *g, GCproto *pt) +{ + ptrdiff_t i; + gc_mark_str(proto_chunkname(pt)); + for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */ + gc_markobj(g, proto_kgc(pt, i)); +#if LJ_HASJIT + if (pt->trace) gc_marktrace(g, pt->trace); +#endif +} + +/* Traverse the frame structure of a stack. */ +static MSize gc_traverse_frames(global_State *g, lua_State *th) +{ + TValue *frame, *top = th->top-1, *bot = tvref(th->stack); + /* Note: extra vararg frame not skipped, marks function twice (harmless). */ + for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) { + GCfunc *fn = frame_func(frame); + TValue *ftop = frame; + if (isluafunc(fn)) ftop += funcproto(fn)->framesize; + if (ftop > top) top = ftop; + gc_markobj(g, fn); /* Need to mark hidden function (or L). */ + } + top++; /* Correct bias of -1 (frame == base-1). */ + if (top > tvref(th->maxstack)) top = tvref(th->maxstack); + return (MSize)(top - bot); /* Return minimum needed stack size. */ +} + +/* Traverse a thread object. */ +static void gc_traverse_thread(global_State *g, lua_State *th) +{ + TValue *o, *top = th->top; + for (o = tvref(th->stack)+1; o < top; o++) + gc_marktv(g, o); + if (g->gc.state == GCSatomic) { + top = tvref(th->stack) + th->stacksize; + for (; o < top; o++) /* Clear unmarked slots. */ + setnilV(o); + } + gc_markobj(g, tabref(th->env)); + lj_state_shrinkstack(th, gc_traverse_frames(g, th)); +} + +/* Propagate one gray object. Traverse it and turn it black. */ +static size_t propagatemark(global_State *g) +{ + GCobj *o = gcref(g->gc.gray); + lua_assert(isgray(o)); + gray2black(o); + setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ + if (LJ_LIKELY(o->gch.gct == ~LJ_TTAB)) { + GCtab *t = gco2tab(o); + if (gc_traverse_tab(g, t) > 0) + black2gray(o); /* Keep weak tables gray. */ + return sizeof(GCtab) + sizeof(TValue) * t->asize + + sizeof(Node) * (t->hmask + 1); + } else if (LJ_LIKELY(o->gch.gct == ~LJ_TFUNC)) { + GCfunc *fn = gco2func(o); + gc_traverse_func(g, fn); + return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : + sizeCfunc((MSize)fn->c.nupvalues); + } else if (LJ_LIKELY(o->gch.gct == ~LJ_TPROTO)) { + GCproto *pt = gco2pt(o); + gc_traverse_proto(g, pt); + return pt->sizept; + } else if (LJ_LIKELY(o->gch.gct == ~LJ_TTHREAD)) { + lua_State *th = gco2th(o); + setgcrefr(th->gclist, g->gc.grayagain); + setgcref(g->gc.grayagain, o); + black2gray(o); /* Threads are never black. */ + gc_traverse_thread(g, th); + return sizeof(lua_State) + sizeof(TValue) * th->stacksize; + } else { +#if LJ_HASJIT + GCtrace *T = gco2trace(o); + gc_traverse_trace(g, T); + return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + + T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); +#else + lua_assert(0); + return 0; +#endif + } +} + +/* Propagate all gray objects. */ +static size_t gc_propagate_gray(global_State *g) +{ + size_t m = 0; + while (gcref(g->gc.gray) != NULL) + m += propagatemark(g); + return m; +} + +/* -- Sweep phase --------------------------------------------------------- */ + +/* Try to shrink some common data structures. */ +static void gc_shrink(global_State *g, lua_State *L) +{ + if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) + lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ + if (g->tmpbuf.sz > LJ_MIN_SBUF*2) + lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */ +} + +/* Type of GC free functions. */ +typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); + +/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */ +static const GCFreeFunc gc_freefunc[] = { + (GCFreeFunc)lj_str_free, + (GCFreeFunc)lj_func_freeuv, + (GCFreeFunc)lj_state_free, + (GCFreeFunc)lj_func_freeproto, + (GCFreeFunc)lj_func_free, +#if LJ_HASJIT + (GCFreeFunc)lj_trace_free, +#else + (GCFreeFunc)0, +#endif +#if LJ_HASFFI + (GCFreeFunc)lj_cdata_free, +#else + (GCFreeFunc)0, +#endif + (GCFreeFunc)lj_tab_free, + (GCFreeFunc)lj_udata_free +}; + +/* Full sweep of a GC list. */ +#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM) + +/* Partial sweep of a GC list. */ +static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) +{ + /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ + int ow = otherwhite(g); + GCobj *o; + while ((o = gcref(*p)) != NULL && lim-- > 0) { + if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ + gc_fullsweep(g, &gco2th(o)->openupval); + if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ + lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED)); + makewhite(g, o); /* Value is alive, change to the current white. */ + p = &o->gch.nextgc; + } else { /* Otherwise value is dead, free it. */ + lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED); + setgcrefr(*p, o->gch.nextgc); + if (o == gcref(g->gc.root)) + setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ + gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o); + } + } + return p; +} + +/* Check whether we can clear a key or a value slot from a table. */ +static int gc_mayclear(cTValue *o, int val) +{ + if (tvisgcv(o)) { /* Only collectable objects can be weak references. */ + if (tvisstr(o)) { /* But strings cannot be used as weak references. */ + gc_mark_str(strV(o)); /* And need to be marked. */ + return 0; + } + if (iswhite(gcV(o))) + return 1; /* Object is about to be collected. */ + if (tvisudata(o) && val && isfinalized(udataV(o))) + return 1; /* Finalized userdata is dropped only from values. */ + } + return 0; /* Cannot clear. */ +} + +/* Clear collected entries from weak tables. */ +static void gc_clearweak(GCobj *o) +{ + while (o) { + GCtab *t = gco2tab(o); + lua_assert((t->marked & LJ_GC_WEAK)); + if ((t->marked & LJ_GC_WEAKVAL)) { + MSize i, asize = t->asize; + for (i = 0; i < asize; i++) { + /* Clear array slot when value is about to be collected. */ + TValue *tv = arrayslot(t, i); + if (gc_mayclear(tv, 1)) + setnilV(tv); + } + } + if (t->hmask > 0) { + Node *node = noderef(t->node); + MSize i, hmask = t->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + /* Clear hash slot when key or value is about to be collected. */ + if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) || + gc_mayclear(&n->val, 1))) + setnilV(&n->val); + } + } + o = gcref(t->gclist); + } +} + +/* Call a userdata or cdata finalizer. */ +static void gc_call_finalizer(global_State *g, lua_State *L, + cTValue *mo, GCobj *o) +{ + /* Save and restore lots of state around the __gc callback. */ + uint8_t oldh = hook_save(g); + MSize oldt = g->gc.threshold; + int errcode; + TValue *top; + lj_trace_abort(g); + top = L->top; + L->top = top+2; + hook_entergc(g); /* Disable hooks and new traces during __gc. */ + g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ + copyTV(L, top, mo); + setgcV(L, top+1, o, ~o->gch.gct); + errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */ + hook_restore(g, oldh); + g->gc.threshold = oldt; /* Restore GC threshold. */ + if (errcode) + lj_err_throw(L, errcode); /* Propagate errors. */ +} + +/* Finalize one userdata or cdata object from the mmudata list. */ +static void gc_finalize(lua_State *L) +{ + global_State *g = G(L); + GCobj *o = gcnext(gcref(g->gc.mmudata)); + cTValue *mo; + lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */ + /* Unchain from list of userdata to be finalized. */ + if (o == gcref(g->gc.mmudata)) + setgcrefnull(g->gc.mmudata); + else + setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc); +#if LJ_HASFFI + if (o->gch.gct == ~LJ_TCDATA) { + TValue tmp, *tv; + /* Add cdata back to the GC list and make it white. */ + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + makewhite(g, o); + o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; + /* Resolve finalizer. */ + setcdataV(L, &tmp, gco2cd(o)); + tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp); + if (!tvisnil(tv)) { + copyTV(L, &tmp, tv); + setnilV(tv); /* Clear entry in finalizer table. */ + gc_call_finalizer(g, L, &tmp, o); + } + return; + } +#endif + /* Add userdata back to the main userdata list and make it white. */ + setgcrefr(o->gch.nextgc, mainthread(g)->nextgc); + setgcref(mainthread(g)->nextgc, o); + makewhite(g, o); + /* Resolve the __gc metamethod. */ + mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc); + if (mo) + gc_call_finalizer(g, L, mo, o); +} + +/* Finalize all userdata objects from mmudata list. */ +void lj_gc_finalize_udata(lua_State *L) +{ + while (gcref(G(L)->gc.mmudata) != NULL) + gc_finalize(L); +} + +#if LJ_HASFFI +/* Finalize all cdata objects from finalizer table. */ +void lj_gc_finalize_cdata(lua_State *L) +{ + global_State *g = G(L); + CTState *cts = ctype_ctsG(g); + if (cts) { + GCtab *t = cts->finalizer; + Node *node = noderef(t->node); + ptrdiff_t i; + setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */ + for (i = (ptrdiff_t)t->hmask; i >= 0; i--) + if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) { + GCobj *o = gcV(&node[i].key); + TValue tmp; + makewhite(g, o); + o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; + copyTV(L, &tmp, &node[i].val); + setnilV(&node[i].val); + gc_call_finalizer(g, L, &tmp, o); + } + } +} +#endif + +/* Free all remaining GC objects. */ +void lj_gc_freeall(global_State *g) +{ + MSize i, strmask; + /* Free everything, except super-fixed objects (the main thread). */ + g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; + gc_fullsweep(g, &g->gc.root); + strmask = g->strmask; + for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ + gc_fullsweep(g, &g->strhash[i]); +} + +/* -- Collector ----------------------------------------------------------- */ + +/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */ +static void atomic(global_State *g, lua_State *L) +{ + size_t udsize; + + gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */ + gc_propagate_gray(g); /* Propagate any left-overs. */ + + setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ + setgcrefnull(g->gc.weak); + lua_assert(!iswhite(obj2gco(mainthread(g)))); + gc_markobj(g, L); /* Mark running thread. */ + gc_traverse_curtrace(g); /* Traverse current trace. */ + gc_mark_gcroot(g); /* Mark GC roots (again). */ + gc_propagate_gray(g); /* Propagate all of the above. */ + + setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */ + setgcrefnull(g->gc.grayagain); + gc_propagate_gray(g); /* Propagate it. */ + + udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */ + gc_mark_mmudata(g); /* Mark them. */ + udsize += gc_propagate_gray(g); /* And propagate the marks. */ + + /* All marking done, clear weak tables. */ + gc_clearweak(gcref(g->gc.weak)); + + /* Prepare for sweep phase. */ + g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ + g->strempty.marked = g->gc.currentwhite; + setmref(g->gc.sweep, &g->gc.root); + g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */ +} + +/* GC state machine. Returns a cost estimate for each step performed. */ +static size_t gc_onestep(lua_State *L) +{ + global_State *g = G(L); + switch (g->gc.state) { + case GCSpause: + gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */ + return 0; + case GCSpropagate: + if (gcref(g->gc.gray) != NULL) + return propagatemark(g); /* Propagate one gray object. */ + g->gc.state = GCSatomic; /* End of mark phase. */ + return 0; + case GCSatomic: + if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */ + return LJ_MAX_MEM; + atomic(g, L); + g->gc.state = GCSsweepstring; /* Start of sweep phase. */ + g->gc.sweepstr = 0; + return 0; + case GCSsweepstring: { + MSize old = g->gc.total; + gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ + if (g->gc.sweepstr > g->strmask) + g->gc.state = GCSsweep; /* All string hash chains sweeped. */ + lua_assert(old >= g->gc.total); + g->gc.estimate -= old - g->gc.total; + return GCSWEEPCOST; + } + case GCSsweep: { + MSize old = g->gc.total; + setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); + if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { + gc_shrink(g, L); + if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ + g->gc.state = GCSfinalize; + } else { /* Otherwise skip this phase to help the JIT. */ + g->gc.state = GCSpause; /* End of GC cycle. */ + g->gc.debt = 0; + } + } + lua_assert(old >= g->gc.total); + g->gc.estimate -= old - g->gc.total; + return GCSWEEPMAX*GCSWEEPCOST; + } + case GCSfinalize: + if (gcref(g->gc.mmudata) != NULL) { + if (gcref(g->jit_L)) /* Don't call finalizers on trace. */ + return LJ_MAX_MEM; + gc_finalize(L); /* Finalize one userdata object. */ + if (g->gc.estimate > GCFINALIZECOST) + g->gc.estimate -= GCFINALIZECOST; + return GCFINALIZECOST; + } + g->gc.state = GCSpause; /* End of GC cycle. */ + g->gc.debt = 0; + return 0; + default: + lua_assert(0); + return 0; + } +} + +/* Perform a limited amount of incremental GC steps. */ +int LJ_FASTCALL lj_gc_step(lua_State *L) +{ + global_State *g = G(L); + MSize lim; + int32_t ostate = g->vmstate; + setvmstate(g, GC); + lim = (GCSTEPSIZE/100) * g->gc.stepmul; + if (lim == 0) + lim = LJ_MAX_MEM; + g->gc.debt += g->gc.total - g->gc.threshold; + do { + lim -= (MSize)gc_onestep(L); + if (g->gc.state == GCSpause) { + g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; + g->vmstate = ostate; + return 1; /* Finished a GC cycle. */ + } + } while ((int32_t)lim > 0); + if (g->gc.debt < GCSTEPSIZE) { + g->gc.threshold = g->gc.total + GCSTEPSIZE; + } else { + g->gc.debt -= GCSTEPSIZE; + g->gc.threshold = g->gc.total; + } + g->vmstate = ostate; + return 0; +} + +/* Ditto, but fix the stack top first. */ +void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) +{ + if (curr_funcisL(L)) L->top = curr_topL(L); + lj_gc_step(L); +} + +#if LJ_HASJIT +/* Perform multiple GC steps. Called from JIT-compiled code. */ +int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) +{ + lua_State *L = gco2th(gcref(g->jit_L)); + L->base = mref(G(L)->jit_base, TValue); + L->top = curr_topL(L); + while (steps-- > 0 && lj_gc_step(L) == 0) + ; + /* Return 1 to force a trace exit. */ + return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize); +} +#endif + +/* Perform a full GC cycle. */ +void lj_gc_fullgc(lua_State *L) +{ + global_State *g = G(L); + int32_t ostate = g->vmstate; + setvmstate(g, GC); + if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */ + setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */ + setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */ + setgcrefnull(g->gc.grayagain); + setgcrefnull(g->gc.weak); + g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */ + g->gc.sweepstr = 0; + } + while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) + gc_onestep(L); /* Finish sweep. */ + lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause); + /* Now perform a full GC. */ + g->gc.state = GCSpause; + do { gc_onestep(L); } while (g->gc.state != GCSpause); + g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; + g->vmstate = ostate; +} + +/* -- Write barriers ------------------------------------------------------ */ + +/* Move the GC propagation frontier forward. */ +void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) +{ + lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + lua_assert(o->gch.gct != ~LJ_TTAB); + /* Preserve invariant during propagation. Otherwise it doesn't matter. */ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_mark(g, v); /* Move frontier forward. */ + else + makewhite(g, o); /* Make it white to avoid the following barrier. */ +} + +/* Specialized barrier for closed upvalue. Pass &uv->tv. */ +void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv) +{ +#define TV2MARKED(x) \ + (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked))) + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_mark(g, gcV(tv)); + else + TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g); +#undef TV2MARKED +} + +/* Close upvalue. Also needs a write barrier. */ +void lj_gc_closeuv(global_State *g, GCupval *uv) +{ + GCobj *o = obj2gco(uv); + /* Copy stack slot to upvalue itself and point to the copy. */ + copyTV(mainthread(g), &uv->tv, uvval(uv)); + setmref(uv->v, &uv->tv); + uv->closed = 1; + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) { + gray2black(o); /* Make it black and preserve invariant. */ + if (tviswhite(&uv->tv)) + lj_gc_barrierf(g, o, gcV(&uv->tv)); + } else { + makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + } + } +} + +#if LJ_HASJIT +/* Mark a trace if it's saved during the propagation phase. */ +void lj_gc_barriertrace(global_State *g, uint32_t traceno) +{ + if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) + gc_marktrace(g, traceno); +} +#endif + +/* -- Allocator ----------------------------------------------------------- */ + +/* Call pluggable memory allocator to allocate or resize a fragment. */ +void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) +{ + global_State *g = G(L); + lua_assert((osz == 0) == (p == NULL)); + p = g->allocf(g->allocd, p, osz, nsz); + if (p == NULL && nsz > 0) + lj_err_mem(L); + lua_assert((nsz == 0) == (p == NULL)); + lua_assert(checkptr32(p)); + g->gc.total = (g->gc.total - osz) + nsz; + return p; +} + +/* Allocate new GC object and link it to the root set. */ +void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size) +{ + global_State *g = G(L); + GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); + if (o == NULL) + lj_err_mem(L); + lua_assert(checkptr32(o)); + g->gc.total += size; + setgcrefr(o->gch.nextgc, g->gc.root); + setgcref(g->gc.root, o); + newwhite(g, o); + return o; +} + +/* Resize growable vector. */ +void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz) +{ + MSize sz = (*szp) << 1; + if (sz < LJ_MIN_VECSZ) + sz = LJ_MIN_VECSZ; + if (sz > lim) + sz = lim; + p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz); + *szp = sz; + return p; +} + diff --git a/src/luajit2/src/lj_gc.h b/src/luajit2/src/lj_gc.h new file mode 100644 index 0000000000..dd7f87ba21 --- /dev/null +++ b/src/luajit2/src/lj_gc.h @@ -0,0 +1,133 @@ +/* +** Garbage collector. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_GC_H +#define _LJ_GC_H + +#include "lj_obj.h" + +/* Garbage collector states. Order matters. */ +enum { + GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize +}; + +/* Bitmasks for marked field of GCobj. */ +#define LJ_GC_WHITE0 0x01 +#define LJ_GC_WHITE1 0x02 +#define LJ_GC_BLACK 0x04 +#define LJ_GC_FINALIZED 0x08 +#define LJ_GC_WEAKKEY 0x08 +#define LJ_GC_WEAKVAL 0x10 +#define LJ_GC_CDATA_FIN 0x10 +#define LJ_GC_FIXED 0x20 +#define LJ_GC_SFIXED 0x40 + +#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1) +#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK) +#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL) + +/* Macros to test and set GCobj colors. */ +#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES) +#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK) +#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES))) +#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x))) +#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES) +#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES) + +#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES) +#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g)) +#define makewhite(g, x) \ + ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g)) +#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES) +#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK) +#define fixstring(s) ((s)->marked |= LJ_GC_FIXED) + +/* Collector. */ +LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all); +LJ_FUNC void lj_gc_finalize_udata(lua_State *L); +#if LJ_HASFFI +LJ_FUNC void lj_gc_finalize_cdata(lua_State *L); +#else +#define lj_gc_finalize_cdata(L) UNUSED(L) +#endif +LJ_FUNC void lj_gc_freeall(global_State *g); +LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L); +LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L); +#if LJ_HASJIT +LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps); +#endif +LJ_FUNC void lj_gc_fullgc(lua_State *L); + +/* GC check: drive collector forward if the GC threshold has been reached. */ +#define lj_gc_check(L) \ + { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ + lj_gc_step(L); } +#define lj_gc_check_fixtop(L) \ + { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \ + lj_gc_step_fixtop(L); } + +/* Write barriers. */ +LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v); +LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv); +LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv); +#if LJ_HASJIT +LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno); +#endif + +/* Move the GC propagation frontier back for tables (make it gray again). */ +static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) +{ + GCobj *o = obj2gco(t); + lua_assert(isblack(o) && !isdead(g, o)); + lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); + black2gray(o); + setgcrefr(t->gclist, g->gc.grayagain); + setgcref(g->gc.grayagain, o); +} + +/* Barrier for stores to table objects. TValue and GCobj variant. */ +#define lj_gc_anybarriert(L, t) \ + { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); } +#define lj_gc_barriert(L, t, tv) \ + { if (tviswhite(tv) && isblack(obj2gco(t))) \ + lj_gc_barrierback(G(L), (t)); } +#define lj_gc_objbarriert(L, t, o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \ + lj_gc_barrierback(G(L), (t)); } + +/* Barrier for stores to any other object. TValue and GCobj variant. */ +#define lj_gc_barrier(L, p, tv) \ + { if (tviswhite(tv) && isblack(obj2gco(p))) \ + lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); } +#define lj_gc_objbarrier(L, p, o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ + lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); } + +/* Allocator. */ +LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz); +LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size); +LJ_FUNC void *lj_mem_grow(lua_State *L, void *p, + MSize *szp, MSize lim, MSize esz); + +#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s)) + +static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize) +{ + g->gc.total -= (MSize)osize; + g->allocf(g->allocd, p, osize, 0); +} + +#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t)))) +#define lj_mem_reallocvec(L, p, on, n, t) \ + ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t)))) +#define lj_mem_growvec(L, p, n, m, t) \ + ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t))) +#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t)) + +#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t))) +#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s))) +#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p))) + +#endif diff --git a/src/luajit2/src/lj_gdbjit.c b/src/luajit2/src/lj_gdbjit.c new file mode 100644 index 0000000000..acbe429ab9 --- /dev/null +++ b/src/luajit2/src/lj_gdbjit.c @@ -0,0 +1,762 @@ +/* +** Client for the GDB JIT API. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_gdbjit_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_frame.h" +#include "lj_jit.h" +#include "lj_dispatch.h" + +/* This is not compiled in by default. +** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything. +*/ +#ifdef LUAJIT_USE_GDBJIT + +/* The GDB JIT API allows JIT compilers to pass debug information about +** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher +** to see it in action. +** +** This is a passive API, so it works even when not running under GDB +** or when attaching to an already running process. Alas, this implies +** enabling it always has a non-negligible overhead -- do not use in +** release mode! +** +** The LuaJIT GDB JIT client is rather minimal at the moment. It gives +** each trace a symbol name and adds a source location and frame unwind +** information. Obviously LuaJIT itself and any embedding C application +** should be compiled with debug symbols, too (see the Makefile). +** +** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace +** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc. +** to set breakpoints on specific traces (even ahead of their creation). +** +** The source location for each trace allows listing the corresponding +** source lines with the GDB command "list" (but only if the Lua source +** has been loaded from a file). Currently this is always set to the +** location where the trace has been started. +** +** Frame unwind information can be inspected with the GDB command +** "info frame". This also allows proper backtraces across JIT-compiled +** code with the GDB command "bt". +** +** You probably want to add the following settings to a .gdbinit file +** (or add them to ~/.gdbinit): +** set disassembly-flavor intel +** set breakpoint pending on +** +** Here's a sample GDB session: +** ------------------------------------------------------------------------ + +$ cat >x.lua +for outer=1,100 do + for inner=1,100 do end +end +^D + +$ luajit -jv x.lua +[TRACE 1 x.lua:2] +[TRACE 2 (1/3) x.lua:1 -> 1] + +$ gdb --quiet --args luajit x.lua +(gdb) tbreak TRACE_1 +Function "TRACE_1" not defined. +Temporary breakpoint 1 (TRACE_1) pending. +(gdb) run +Starting program: luajit x.lua + +Temporary breakpoint 1, TRACE_1 () at x.lua:2 +2 for inner=1,100 do end +(gdb) list +1 for outer=1,100 do +2 for inner=1,100 do end +3 end +(gdb) bt +#0 TRACE_1 () at x.lua:2 +#1 0x08053690 in lua_pcall [...] +[...] +#7 0x0806ff90 in main [...] +(gdb) disass TRACE_1 +Dump of assembler code for function TRACE_1: +0xf7fd9fba <TRACE_1+0>: mov DWORD PTR ds:0xf7e0e2a0,0x1 +0xf7fd9fc4 <TRACE_1+10>: movsd xmm7,QWORD PTR [edx+0x20] +[...] +0xf7fd9ff8 <TRACE_1+62>: jmp 0xf7fd2014 +End of assembler dump. +(gdb) tbreak TRACE_2 +Function "TRACE_2" not defined. +Temporary breakpoint 2 (TRACE_2) pending. +(gdb) cont +Continuing. + +Temporary breakpoint 2, TRACE_2 () at x.lua:1 +1 for outer=1,100 do +(gdb) info frame +Stack level 0, frame at 0xffffd7c0: + eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690 + called by frame at 0xffffd7e0 + source language unknown. + Arglist at 0xffffd78c, args: + Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0 + Saved registers: + ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4, + eip at 0xffffd7bc +(gdb) + +** ------------------------------------------------------------------------ +*/ + +/* -- GDB JIT API --------------------------------------------------------- */ + +/* GDB JIT actions. */ +enum { + GDBJIT_NOACTION = 0, + GDBJIT_REGISTER, + GDBJIT_UNREGISTER +}; + +/* GDB JIT entry. */ +typedef struct GDBJITentry { + struct GDBJITentry *next_entry; + struct GDBJITentry *prev_entry; + const char *symfile_addr; + uint64_t symfile_size; +} GDBJITentry; + +/* GDB JIT descriptor. */ +typedef struct GDBJITdesc { + uint32_t version; + uint32_t action_flag; + GDBJITentry *relevant_entry; + GDBJITentry *first_entry; +} GDBJITdesc; + +GDBJITdesc __jit_debug_descriptor = { + 1, GDBJIT_NOACTION, NULL, NULL +}; + +/* GDB sets a breakpoint at this function. */ +void LJ_NOINLINE __jit_debug_register_code() +{ + __asm__ __volatile__(""); +}; + +/* -- In-memory ELF object definitions ------------------------------------ */ + +/* ELF definitions. */ +typedef struct ELFheader { + uint8_t emagic[4]; + uint8_t eclass; + uint8_t eendian; + uint8_t eversion; + uint8_t eosabi; + uint8_t eabiversion; + uint8_t epad[7]; + uint16_t type; + uint16_t machine; + uint32_t version; + uintptr_t entry; + uintptr_t phofs; + uintptr_t shofs; + uint32_t flags; + uint16_t ehsize; + uint16_t phentsize; + uint16_t phnum; + uint16_t shentsize; + uint16_t shnum; + uint16_t shstridx; +} ELFheader; + +typedef struct ELFsectheader { + uint32_t name; + uint32_t type; + uintptr_t flags; + uintptr_t addr; + uintptr_t ofs; + uintptr_t size; + uint32_t link; + uint32_t info; + uintptr_t align; + uintptr_t entsize; +} ELFsectheader; + +#define ELFSECT_IDX_ABS 0xfff1 + +enum { + ELFSECT_TYPE_PROGBITS = 1, + ELFSECT_TYPE_SYMTAB = 2, + ELFSECT_TYPE_STRTAB = 3, + ELFSECT_TYPE_NOBITS = 8 +}; + +#define ELFSECT_FLAGS_WRITE 1 +#define ELFSECT_FLAGS_ALLOC 2 +#define ELFSECT_FLAGS_EXEC 4 + +typedef struct ELFsymbol { +#if LJ_64 + uint32_t name; + uint8_t info; + uint8_t other; + uint16_t sectidx; + uintptr_t value; + uint64_t size; +#else + uint32_t name; + uintptr_t value; + uint32_t size; + uint8_t info; + uint8_t other; + uint16_t sectidx; +#endif +} ELFsymbol; + +enum { + ELFSYM_TYPE_FUNC = 2, + ELFSYM_TYPE_FILE = 4, + ELFSYM_BIND_LOCAL = 0 << 4, + ELFSYM_BIND_GLOBAL = 1 << 4, +}; + +/* DWARF definitions. */ +#define DW_CIE_VERSION 1 + +enum { + DW_CFA_nop = 0x0, + DW_CFA_def_cfa = 0xc, + DW_CFA_def_cfa_offset = 0xe, + DW_CFA_advance_loc = 0x40, + DW_CFA_offset = 0x80 +}; + +enum { + DW_EH_PE_udata4 = 3, + DW_EH_PE_textrel = 0x20 +}; + +enum { + DW_TAG_compile_unit = 0x11 +}; + +enum { + DW_children_no = 0, + DW_children_yes = 1 +}; + +enum { + DW_AT_name = 0x03, + DW_AT_stmt_list = 0x10, + DW_AT_low_pc = 0x11, + DW_AT_high_pc = 0x12 +}; + +enum { + DW_FORM_addr = 0x01, + DW_FORM_data4 = 0x06, + DW_FORM_string = 0x08 +}; + +enum { + DW_LNS_extended_op = 0, + DW_LNS_copy = 1, + DW_LNS_advance_pc = 2, + DW_LNS_advance_line = 3 +}; + +enum { + DW_LNE_end_sequence = 1, + DW_LNE_set_address = 2 +}; + +enum { +#if LJ_TARGET_X86 + DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX, + DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI, + DW_REG_RA, +#elif LJ_TARGET_X64 + /* Yes, the order is strange, but correct. */ + DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX, + DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP, + DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11, + DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15, + DW_REG_RA, +#elif LJ_TARGET_ARM + DW_REG_SP = 13, + DW_REG_RA = 14, +#else +#error "Unsupported target architecture" +#endif +}; + +/* Minimal list of sections for the in-memory ELF object. */ +enum { + GDBJIT_SECT_NULL, + GDBJIT_SECT_text, + GDBJIT_SECT_eh_frame, + GDBJIT_SECT_shstrtab, + GDBJIT_SECT_strtab, + GDBJIT_SECT_symtab, + GDBJIT_SECT_debug_info, + GDBJIT_SECT_debug_abbrev, + GDBJIT_SECT_debug_line, + GDBJIT_SECT__MAX +}; + +enum { + GDBJIT_SYM_UNDEF, + GDBJIT_SYM_FILE, + GDBJIT_SYM_FUNC, + GDBJIT_SYM__MAX +}; + +/* In-memory ELF object. */ +typedef struct GDBJITobj { + ELFheader hdr; /* ELF header. */ + ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */ + ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */ + uint8_t space[4096]; /* Space for various section data. */ +} GDBJITobj; + +/* Combined structure for GDB JIT entry and ELF object. */ +typedef struct GDBJITentryobj { + GDBJITentry entry; + size_t sz; + GDBJITobj obj; +} GDBJITentryobj; + +/* Template for in-memory ELF header. */ +static const ELFheader elfhdr_template = { + .emagic = { 0x7f, 'E', 'L', 'F' }, + .eclass = LJ_64 ? 2 : 1, + .eendian = LJ_ENDIAN_SELECT(1, 2), + .eversion = 1, +#if LJ_TARGET_LINUX + .eosabi = 0, /* Nope, it's not 3. */ +#elif defined(__FreeBSD__) + .eosabi = 9, +#elif defined(__NetBSD__) + .eosabi = 2, +#elif defined(__OpenBSD__) + .eosabi = 12, +#elif (defined(__sun__) && defined(__svr4__)) || defined(__solaris__) + .eosabi = 6, +#else + .eosabi = 0, +#endif + .eabiversion = 0, + .epad = { 0, 0, 0, 0, 0, 0, 0 }, + .type = 1, +#if LJ_TARGET_X86 + .machine = 3, +#elif LJ_TARGET_X64 + .machine = 62, +#elif LJ_TARGET_ARM + .machine = 40, +#else +#error "Unsupported target architecture" +#endif + .version = 1, + .entry = 0, + .phofs = 0, + .shofs = offsetof(GDBJITobj, sect), + .flags = 0, + .ehsize = sizeof(ELFheader), + .phentsize = 0, + .phnum = 0, + .shentsize = sizeof(ELFsectheader), + .shnum = GDBJIT_SECT__MAX, + .shstridx = GDBJIT_SECT_shstrtab +}; + +/* -- In-memory ELF object generation ------------------------------------- */ + +/* Context for generating the ELF object for the GDB JIT API. */ +typedef struct GDBJITctx { + uint8_t *p; /* Pointer to next address in obj.space. */ + uint8_t *startp; /* Pointer to start address in obj.space. */ + GCtrace *T; /* Generate symbols for this trace. */ + uintptr_t mcaddr; /* Machine code address. */ + MSize szmcode; /* Size of machine code. */ + MSize spadjp; /* Stack adjustment for parent trace or interpreter. */ + MSize spadj; /* Stack adjustment for trace itself. */ + BCLine lineno; /* Starting line number. */ + const char *filename; /* Starting file name. */ + size_t objsize; /* Final size of ELF object. */ + GDBJITobj obj; /* In-memory ELF object. */ +} GDBJITctx; + +/* Add a zero-terminated string. */ +static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str) +{ + uint8_t *p = ctx->p; + uint32_t ofs = (uint32_t)(p - ctx->startp); + do { + *p++ = (uint8_t)*str; + } while (*str++); + ctx->p = p; + return ofs; +} + +/* Append a decimal number. */ +static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n) +{ + if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); } + *ctx->p++ = '0' + n; +} + +/* Add a ULEB128 value. */ +static void gdbjit_uleb128(GDBJITctx *ctx, uint32_t v) +{ + uint8_t *p = ctx->p; + for (; v >= 0x80; v >>= 7) + *p++ = (uint8_t)((v & 0x7f) | 0x80); + *p++ = (uint8_t)v; + ctx->p = p; +} + +/* Add a SLEB128 value. */ +static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v) +{ + uint8_t *p = ctx->p; + for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7) + *p++ = (uint8_t)((v & 0x7f) | 0x80); + *p++ = (uint8_t)(v & 0x7f); + ctx->p = p; +} + +/* Shortcuts to generate DWARF structures. */ +#define DB(x) (*p++ = (x)) +#define DI8(x) (*(int8_t *)p = (x), p++) +#define DU16(x) (*(uint16_t *)p = (x), p += 2) +#define DU32(x) (*(uint32_t *)p = (x), p += 4) +#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t)) +#define DUV(x) (ctx->p = p, gdbjit_uleb128(ctx, (x)), p = ctx->p) +#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p) +#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p) +#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop +#define DSECT(name, stmt) \ + { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \ + *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \ + +/* Initialize ELF section headers. */ +static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx) +{ + ELFsectheader *sect; + + *ctx->p++ = '\0'; /* Empty string at start of string table. */ + +#define SECTDEF(id, tp, al) \ + sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \ + sect->name = gdbjit_strz(ctx, "." #id); \ + sect->type = ELFSECT_TYPE_##tp; \ + sect->align = (al) + + SECTDEF(text, NOBITS, 16); + sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC; + sect->addr = ctx->mcaddr; + sect->ofs = 0; + sect->size = ctx->szmcode; + + SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t)); + sect->flags = ELFSECT_FLAGS_ALLOC; + + SECTDEF(shstrtab, STRTAB, 1); + SECTDEF(strtab, STRTAB, 1); + + SECTDEF(symtab, SYMTAB, sizeof(uintptr_t)); + sect->ofs = offsetof(GDBJITobj, sym); + sect->size = sizeof(ctx->obj.sym); + sect->link = GDBJIT_SECT_strtab; + sect->entsize = sizeof(ELFsymbol); + sect->info = GDBJIT_SYM_FUNC; + + SECTDEF(debug_info, PROGBITS, 1); + SECTDEF(debug_abbrev, PROGBITS, 1); + SECTDEF(debug_line, PROGBITS, 1); + +#undef SECTDEF +} + +/* Initialize symbol table. */ +static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx) +{ + ELFsymbol *sym; + + *ctx->p++ = '\0'; /* Empty string at start of string table. */ + + sym = &ctx->obj.sym[GDBJIT_SYM_FILE]; + sym->name = gdbjit_strz(ctx, "JIT mcode"); + sym->sectidx = ELFSECT_IDX_ABS; + sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL; + + sym = &ctx->obj.sym[GDBJIT_SYM_FUNC]; + sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--; + gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0'; + sym->sectidx = GDBJIT_SECT_text; + sym->value = 0; + sym->size = ctx->szmcode; + sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL; +} + +/* Initialize .eh_frame section. */ +static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + uint8_t *framep = p; + + /* Emit DWARF EH CIE. */ + DSECT(CIE, + DU32(0); /* Offset to CIE itself. */ + DB(DW_CIE_VERSION); + DSTR("zR"); /* Augmentation. */ + DUV(1); /* Code alignment factor. */ + DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */ + DB(DW_REG_RA); /* Return address register. */ + DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */ + DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t)); + DB(DW_CFA_offset|DW_REG_RA); DUV(1); + DALIGNNOP(sizeof(uintptr_t)); + ) + + /* Emit DWARF EH FDE. */ + DSECT(FDE, + DU32((uint32_t)(p-framep)); /* Offset to CIE. */ + DU32(0); /* Machine code offset relative to .text. */ + DU32(ctx->szmcode); /* Machine code length. */ + DB(0); /* Augmentation data. */ + /* Registers saved in CFRAME. */ +#if LJ_TARGET_X86 + DB(DW_CFA_offset|DW_REG_BP); DUV(2); + DB(DW_CFA_offset|DW_REG_DI); DUV(3); + DB(DW_CFA_offset|DW_REG_SI); DUV(4); + DB(DW_CFA_offset|DW_REG_BX); DUV(5); +#elif LJ_TARGET_X64 + DB(DW_CFA_offset|DW_REG_BP); DUV(2); + DB(DW_CFA_offset|DW_REG_BX); DUV(3); + DB(DW_CFA_offset|DW_REG_15); DUV(4); + DB(DW_CFA_offset|DW_REG_14); DUV(5); + /* Extra registers saved for JIT-compiled code. */ + DB(DW_CFA_offset|DW_REG_13); DUV(9); + DB(DW_CFA_offset|DW_REG_12); DUV(10); +#elif LJ_TARGET_ARM + { + int i; + for (i = 11; i >= 4; i--) { /* R4-R11. */ + DB(DW_CFA_offset|i); DUV(2+(11-i)); + } + } +#else +#error "Unsupported target architecture" +#endif + if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */ + DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp); + DB(DW_CFA_advance_loc|1); /* Only an approximation. */ + } + DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */ + DALIGNNOP(sizeof(uintptr_t)); + ) + + ctx->p = p; +} + +/* Initialize .debug_info section. */ +static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + DSECT(info, + DU16(2); /* DWARF version. */ + DU32(0); /* Abbrev offset. */ + DB(sizeof(uintptr_t)); /* Pointer size. */ + + DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */ + DSTR(ctx->filename); /* DW_AT_name. */ + DADDR(ctx->mcaddr); /* DW_AT_low_pc. */ + DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */ + DU32(0); /* DW_AT_stmt_list. */ + ) + + ctx->p = p; +} + +/* Initialize .debug_abbrev section. */ +static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + /* Abbrev #1: DW_TAG_compile_unit. */ + DUV(1); DUV(DW_TAG_compile_unit); + DB(DW_children_no); + DUV(DW_AT_name); DUV(DW_FORM_string); + DUV(DW_AT_low_pc); DUV(DW_FORM_addr); + DUV(DW_AT_high_pc); DUV(DW_FORM_addr); + DUV(DW_AT_stmt_list); DUV(DW_FORM_data4); + DB(0); DB(0); + + ctx->p = p; +} + +#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op))) + +/* Initialize .debug_line section. */ +static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx) +{ + uint8_t *p = ctx->p; + + DSECT(line, + DU16(2); /* DWARF version. */ + DSECT(header, + DB(1); /* Minimum instruction length. */ + DB(1); /* is_stmt. */ + DI8(0); /* Line base for special opcodes. */ + DB(2); /* Line range for special opcodes. */ + DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */ + DB(0); DB(1); DB(1); /* Standard opcode lengths. */ + /* Directory table. */ + DB(0); + /* File name table. */ + DSTR(ctx->filename); DUV(0); DUV(0); DUV(0); + DB(0); + ) + + DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr); + if (ctx->lineno) { + DB(DW_LNS_advance_line); DSV(ctx->lineno-1); + } + DB(DW_LNS_copy); + DB(DW_LNS_advance_pc); DUV(ctx->szmcode); + DLNE(DW_LNE_end_sequence, 0); + ) + + ctx->p = p; +} + +#undef DLNE + +/* Undef shortcuts. */ +#undef DB +#undef DI8 +#undef DU16 +#undef DU32 +#undef DADDR +#undef DUV +#undef DSV +#undef DSTR +#undef DALIGNNOP +#undef DSECT + +/* Type of a section initializer callback. */ +typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx); + +/* Call section initializer and set the section offset and size. */ +static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf) +{ + ctx->startp = ctx->p; + ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj); + initf(ctx); + ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp); +} + +#define SECTALIGN(p, a) \ + ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1))) + +/* Build in-memory ELF object. */ +static void gdbjit_buildobj(GDBJITctx *ctx) +{ + GDBJITobj *obj = &ctx->obj; + /* Fill in ELF header and clear structures. */ + memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader)); + memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX); + memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX); + /* Initialize sections. */ + ctx->p = obj->space; + gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr); + gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev); + gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline); + SECTALIGN(ctx->p, sizeof(uintptr_t)); + gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); + ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); + lua_assert(ctx->objsize < sizeof(GDBJITobj)); +} + +#undef SECTALIGN + +/* -- Interface to GDB JIT API -------------------------------------------- */ + +/* Add new entry to GDB JIT symbol chain. */ +static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx) +{ + /* Allocate memory for GDB JIT entry and ELF object. */ + MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize); + GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj); + memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */ + eo->sz = sz; + ctx->T->gdbjit_entry = (void *)eo; + /* Link new entry to chain and register it. */ + eo->entry.prev_entry = NULL; + eo->entry.next_entry = __jit_debug_descriptor.first_entry; + if (eo->entry.next_entry) + eo->entry.next_entry->prev_entry = &eo->entry; + eo->entry.symfile_addr = (const char *)&eo->obj; + eo->entry.symfile_size = ctx->objsize; + __jit_debug_descriptor.first_entry = &eo->entry; + __jit_debug_descriptor.relevant_entry = &eo->entry; + __jit_debug_descriptor.action_flag = GDBJIT_REGISTER; + __jit_debug_register_code(); +} + +/* Add debug info for newly compiled trace and notify GDB. */ +void lj_gdbjit_addtrace(jit_State *J, GCtrace *T) +{ + GDBJITctx ctx; + GCproto *pt = &gcref(T->startpt)->pt; + TraceNo parent = T->ir[REF_BASE].op1; + const BCIns *startpc = mref(T->startpc, const BCIns); + ctx.T = T; + ctx.mcaddr = (uintptr_t)T->mcode; + ctx.szmcode = T->szmcode; + ctx.spadjp = CFRAME_SIZE_JIT + + (MSize)(parent ? traceref(J, parent)->spadjust : 0); + ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; + lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); + ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); + ctx.filename = proto_chunknamestr(pt); + if (*ctx.filename == '@' || *ctx.filename == '=') + ctx.filename++; + else + ctx.filename = "(string)"; + gdbjit_buildobj(&ctx); + gdbjit_newentry(J->L, &ctx); +} + +/* Delete debug info for trace and notify GDB. */ +void lj_gdbjit_deltrace(jit_State *J, GCtrace *T) +{ + GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry; + if (eo) { + if (eo->entry.prev_entry) + eo->entry.prev_entry->next_entry = eo->entry.next_entry; + else + __jit_debug_descriptor.first_entry = eo->entry.next_entry; + if (eo->entry.next_entry) + eo->entry.next_entry->prev_entry = eo->entry.prev_entry; + __jit_debug_descriptor.relevant_entry = &eo->entry; + __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER; + __jit_debug_register_code(); + lj_mem_free(J2G(J), eo, eo->sz); + } +} + +#endif +#endif diff --git a/src/luajit2/src/lj_gdbjit.h b/src/luajit2/src/lj_gdbjit.h new file mode 100644 index 0000000000..5e42d3cba7 --- /dev/null +++ b/src/luajit2/src/lj_gdbjit.h @@ -0,0 +1,22 @@ +/* +** Client for the GDB JIT API. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_GDBJIT_H +#define _LJ_GDBJIT_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT) + +LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T); +LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T); + +#else +#define lj_gdbjit_addtrace(J, T) UNUSED(T) +#define lj_gdbjit_deltrace(J, T) UNUSED(T) +#endif + +#endif diff --git a/src/luajit2/src/lj_ir.c b/src/luajit2/src/lj_ir.c new file mode 100644 index 0000000000..8f22e08538 --- /dev/null +++ b/src/luajit2/src/lj_ir.c @@ -0,0 +1,492 @@ +/* +** SSA IR (Intermediate Representation) emitter. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_ir_c +#define LUA_CORE + +/* For pointers to libc/libm functions. */ +#include <stdio.h> +#include <math.h> + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lj_carith.h" +#endif +#include "lj_vm.h" +#include "lj_lib.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* -- IR tables ----------------------------------------------------------- */ + +/* IR instruction modes. */ +LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = { +IRDEF(IRMODE) + 0 +}; + +/* C call info for CALL* instructions. */ +LJ_DATADEF const CCallInfo lj_ir_callinfo[] = { +#define IRCALLCI(name, nargs, kind, type, flags) \ + { (ASMFunction)name, \ + (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) }, +IRCALLDEF(IRCALLCI) +#undef IRCALLCI + { NULL, 0 } +}; + +/* -- IR emitter ---------------------------------------------------------- */ + +/* Grow IR buffer at the top. */ +void LJ_FASTCALL lj_ir_growtop(jit_State *J) +{ + IRIns *baseir = J->irbuf + J->irbotlim; + MSize szins = J->irtoplim - J->irbotlim; + if (szins) { + baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns), + 2*szins*sizeof(IRIns)); + J->irtoplim = J->irbotlim + 2*szins; + } else { + baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns)); + J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4; + J->irtoplim = J->irbotlim + LJ_MIN_IRSZ; + } + J->cur.ir = J->irbuf = baseir - J->irbotlim; +} + +/* Grow IR buffer at the bottom or shift it up. */ +static void lj_ir_growbot(jit_State *J) +{ + IRIns *baseir = J->irbuf + J->irbotlim; + MSize szins = J->irtoplim - J->irbotlim; + lua_assert(szins != 0); + lua_assert(J->cur.nk == J->irbotlim); + if (J->cur.nins + (szins >> 1) < J->irtoplim) { + /* More than half of the buffer is free on top: shift up by a quarter. */ + MSize ofs = szins >> 2; + memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); + J->irbotlim -= ofs; + J->irtoplim -= ofs; + J->cur.ir = J->irbuf = baseir - J->irbotlim; + } else { + /* Double the buffer size, but split the growth amongst top/bottom. */ + IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns); + MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */ + memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); + lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns)); + J->irbotlim -= ofs; + J->irtoplim = J->irbotlim + 2*szins; + J->cur.ir = J->irbuf = newbase - J->irbotlim; + } +} + +/* Emit IR without any optimizations. */ +TRef LJ_FASTCALL lj_ir_emit(jit_State *J) +{ + IRRef ref = lj_ir_nextins(J); + IRIns *ir = IR(ref); + IROp op = fins->o; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; + ir->o = op; + ir->op1 = fins->op1; + ir->op2 = fins->op2; + J->guardemit.irt |= fins->t.irt; + return TREF(ref, irt_t((ir->t = fins->t))); +} + +/* Emit call to a C function. */ +TRef lj_ir_call(jit_State *J, IRCallID id, ...) +{ + const CCallInfo *ci = &lj_ir_callinfo[id]; + uint32_t n = CCI_NARGS(ci); + TRef tr = TREF_NIL; + va_list argp; + va_start(argp, id); + if ((ci->flags & CCI_L)) n--; + if (n > 0) + tr = va_arg(argp, IRRef); + while (n-- > 1) + tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef)); + va_end(argp); + if (CCI_OP(ci) == IR_CALLS) + J->needsnap = 1; /* Need snapshot after call with side effect. */ + return emitir(CCI_OPTYPE(ci), tr, id); +} + +/* -- Interning of constants ---------------------------------------------- */ + +/* +** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS. +** They are chained like all other instructions, but grow downwards. +** The are interned (like strings in the VM) to facilitate reference +** comparisons. The same constant must get the same reference. +*/ + +/* Get ref of next IR constant and optionally grow IR. +** Note: this may invalidate all IRIns *! +*/ +static LJ_AINLINE IRRef ir_nextk(jit_State *J) +{ + IRRef ref = J->cur.nk; + if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J); + J->cur.nk = --ref; + return ref; +} + +/* Intern int32_t constant. */ +TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev) + if (cir[ref].i == k) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->i = k; + ir->t.irt = IRT_INT; + ir->o = IR_KINT; + ir->prev = J->chain[IR_KINT]; + J->chain[IR_KINT] = (IRRef1)ref; +found: + return TREF(ref, IRT_INT); +} + +/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the +** 64 bit constant. The constants themselves are stored in a chained array +** and shared across traces. +** +** Rationale for choosing this data structure: +** - The address of the constants is embedded in the generated machine code +** and must never move. A resizable array or hash table wouldn't work. +** - Most apps need very few non-32 bit integer constants (less than a dozen). +** - Linear search is hard to beat in terms of speed and low complexity. +*/ +typedef struct K64Array { + MRef next; /* Pointer to next list. */ + MSize numk; /* Number of used elements in this array. */ + TValue k[LJ_MIN_K64SZ]; /* Array of constants. */ +} K64Array; + +/* Free all chained arrays. */ +void lj_ir_k64_freeall(jit_State *J) +{ + K64Array *k; + for (k = mref(J->k64, K64Array); k; ) { + K64Array *next = mref(k->next, K64Array); + lj_mem_free(J2G(J), k, sizeof(K64Array)); + k = next; + } +} + +/* Find 64 bit constant in chained array or add it. */ +cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64) +{ + K64Array *k, *kp = NULL; + TValue *ntv; + MSize idx; + /* Search for the constant in the whole chain of arrays. */ + for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) { + kp = k; /* Remember previous element in list. */ + for (idx = 0; idx < k->numk; idx++) { /* Search one array. */ + TValue *tv = &k->k[idx]; + if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */ + return tv; + } + } + /* Constant was not found, need to add it. */ + if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */ + K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array); + setmref(kn->next, NULL); + kn->numk = 0; + if (kp) + setmref(kp->next, kn); /* Chain to the end of the list. */ + else + setmref(J->k64, kn); /* Link first array. */ + kp = kn; + } + ntv = &kp->k[kp->numk++]; /* Add to current array. */ + ntv->u64 = u64; + return ntv; +} + +/* Intern 64 bit constant, given by its address. */ +TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64; + for (ref = J->chain[op]; ref; ref = cir[ref].prev) + if (ir_k64(&cir[ref]) == tv) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + lua_assert(checkptr32(tv)); + setmref(ir->ptr, tv); + ir->t.irt = t; + ir->o = op; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern FP constant, given by its 64 bit pattern. */ +TRef lj_ir_knum_u64(jit_State *J, uint64_t u64) +{ + return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64)); +} + +/* Intern 64 bit integer constant. */ +TRef lj_ir_kint64(jit_State *J, uint64_t u64) +{ + return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64)); +} + +/* Check whether a number is int and return it. -0 is NOT considered an int. */ +static int numistrueint(lua_Number n, int32_t *kp) +{ + int32_t k = lj_num2int(n); + if (n == (lua_Number)k) { + if (kp) *kp = k; + if (k == 0) { /* Special check for -0. */ + TValue tv; + setnumV(&tv, n); + if (tv.u32.hi != 0) + return 0; + } + return 1; + } + return 0; +} + +/* Intern number as int32_t constant if possible, otherwise as FP constant. */ +TRef lj_ir_knumint(jit_State *J, lua_Number n) +{ + int32_t k; + if (numistrueint(n, &k)) + return lj_ir_kint(J, k); + else + return lj_ir_knum(J, n); +} + +/* Intern GC object "constant". */ +TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + lua_assert(!isdead(J2G(J), o)); + for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) + if (ir_kgc(&cir[ref]) == o) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + /* NOBARRIER: Current trace is a GC root. */ + setgcref(ir->gcr, o); + ir->t.irt = (uint8_t)t; + ir->o = IR_KGC; + ir->prev = J->chain[IR_KGC]; + J->chain[IR_KGC] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern 32 bit pointer constant. */ +TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr); + for (ref = J->chain[op]; ref; ref = cir[ref].prev) + if (mref(cir[ref].ptr, void) == ptr) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + setmref(ir->ptr, ptr); + ir->t.irt = IRT_P32; + ir->o = op; + ir->prev = J->chain[op]; + J->chain[op] = (IRRef1)ref; +found: + return TREF(ref, IRT_P32); +} + +/* Intern typed NULL constant. */ +TRef lj_ir_knull(jit_State *J, IRType t) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef ref; + for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev) + if (irt_t(cir[ref].t) == t) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->i = 0; + ir->t.irt = (uint8_t)t; + ir->o = IR_KNULL; + ir->prev = J->chain[IR_KNULL]; + J->chain[IR_KNULL] = (IRRef1)ref; +found: + return TREF(ref, t); +} + +/* Intern key slot. */ +TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot) +{ + IRIns *ir, *cir = J->cur.ir; + IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); + IRRef ref; + /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ + lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); + for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) + if (cir[ref].op12 == op12) + goto found; + ref = ir_nextk(J); + ir = IR(ref); + ir->op12 = op12; + ir->t.irt = IRT_P32; + ir->o = IR_KSLOT; + ir->prev = J->chain[IR_KSLOT]; + J->chain[IR_KSLOT] = (IRRef1)ref; +found: + return TREF(ref, IRT_P32); +} + +/* -- Access to IR constants ---------------------------------------------- */ + +/* Copy value of IR constant. */ +void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) +{ + UNUSED(L); + lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ + switch (ir->o) { + case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break; + case IR_KINT: setintV(tv, ir->i); break; + case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break; + case IR_KPTR: case IR_KKPTR: case IR_KNULL: + setlightudV(tv, mref(ir->ptr, void)); + break; + case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break; +#if LJ_HASFFI + case IR_KINT64: { + GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8); + *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64; + setcdataV(L, tv, cd); + break; + } +#endif + default: lua_assert(0); break; + } +} + +/* -- Convert IR operand types -------------------------------------------- */ + +/* Convert from string to number. */ +TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr) +{ + if (!tref_isnumber(tr)) { + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + else + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + return tr; +} + +/* Convert from integer or string to number. */ +TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr) +{ + if (!tref_isnum(tr)) { + if (tref_isinteger(tr)) + tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + else if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + else + lj_trace_err(J, LJ_TRERR_BADTYPE); + } + return tr; +} + +/* Convert from integer or number to string. */ +TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr) +{ + if (!tref_isstr(tr)) { + if (!tref_isnumber(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0); + } + return tr; +} + +/* -- Miscellaneous IR ops ------------------------------------------------ */ + +/* Evaluate numeric comparison. */ +int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op) +{ + switch (op) { + case IR_EQ: return (a == b); + case IR_NE: return (a != b); + case IR_LT: return (a < b); + case IR_GE: return (a >= b); + case IR_LE: return (a <= b); + case IR_GT: return (a > b); + case IR_ULT: return !(a >= b); + case IR_UGE: return !(a < b); + case IR_ULE: return !(a > b); + case IR_UGT: return !(a <= b); + default: lua_assert(0); return 0; + } +} + +/* Evaluate string comparison. */ +int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op) +{ + int res = lj_str_cmp(a, b); + switch (op) { + case IR_LT: return (res < 0); + case IR_GE: return (res >= 0); + case IR_LE: return (res <= 0); + case IR_GT: return (res > 0); + default: lua_assert(0); return 0; + } +} + +/* Rollback IR to previous state. */ +void lj_ir_rollback(jit_State *J, IRRef ref) +{ + IRRef nins = J->cur.nins; + while (nins > ref) { + IRIns *ir; + nins--; + ir = IR(nins); + J->chain[ir->o] = ir->prev; + } + J->cur.nins = nins; +} + +#undef IR +#undef fins +#undef emitir + +#endif diff --git a/src/luajit2/src/lj_ir.h b/src/luajit2/src/lj_ir.h new file mode 100644 index 0000000000..aac34350d1 --- /dev/null +++ b/src/luajit2/src/lj_ir.h @@ -0,0 +1,538 @@ +/* +** SSA IR (Intermediate Representation) format. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IR_H +#define _LJ_IR_H + +#include "lj_obj.h" + +/* -- IR instructions ----------------------------------------------------- */ + +/* IR instruction definition. Order matters, see below. ORDER IR */ +#define IRDEF(_) \ + /* Guarded assertions. */ \ + /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \ + _(LT, N , ref, ref) \ + _(GE, N , ref, ref) \ + _(LE, N , ref, ref) \ + _(GT, N , ref, ref) \ + \ + _(ULT, N , ref, ref) \ + _(UGE, N , ref, ref) \ + _(ULE, N , ref, ref) \ + _(UGT, N , ref, ref) \ + \ + _(EQ, C , ref, ref) \ + _(NE, C , ref, ref) \ + \ + _(ABC, N , ref, ref) \ + _(RETF, S , ref, ref) \ + \ + /* Miscellaneous ops. */ \ + _(NOP, N , ___, ___) \ + _(BASE, N , lit, lit) \ + _(HIOP, S , ref, ref) \ + _(LOOP, S , ___, ___) \ + _(USE, S , ref, ___) \ + _(PHI, S , ref, ref) \ + _(RENAME, S , ref, lit) \ + \ + /* Constants. */ \ + _(KPRI, N , ___, ___) \ + _(KINT, N , cst, ___) \ + _(KGC, N , cst, ___) \ + _(KPTR, N , cst, ___) \ + _(KKPTR, N , cst, ___) \ + _(KNULL, N , cst, ___) \ + _(KNUM, N , cst, ___) \ + _(KINT64, N , cst, ___) \ + _(KSLOT, N , ref, lit) \ + \ + /* Bit ops. */ \ + _(BNOT, N , ref, ___) \ + _(BSWAP, N , ref, ___) \ + _(BAND, C , ref, ref) \ + _(BOR, C , ref, ref) \ + _(BXOR, C , ref, ref) \ + _(BSHL, N , ref, ref) \ + _(BSHR, N , ref, ref) \ + _(BSAR, N , ref, ref) \ + _(BROL, N , ref, ref) \ + _(BROR, N , ref, ref) \ + \ + /* Arithmetic ops. ORDER ARITH */ \ + _(ADD, C , ref, ref) \ + _(SUB, N , ref, ref) \ + _(MUL, C , ref, ref) \ + _(DIV, N , ref, ref) \ + _(MOD, N , ref, ref) \ + _(POW, N , ref, ref) \ + _(NEG, N , ref, ref) \ + \ + _(ABS, N , ref, ref) \ + _(ATAN2, N , ref, ref) \ + _(LDEXP, N , ref, ref) \ + _(MIN, C , ref, ref) \ + _(MAX, C , ref, ref) \ + _(FPMATH, N , ref, lit) \ + \ + /* Overflow-checking arithmetic ops. */ \ + _(ADDOV, CW, ref, ref) \ + _(SUBOV, NW, ref, ref) \ + _(MULOV, CW, ref, ref) \ + \ + /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \ + \ + /* Memory references. */ \ + _(AREF, R , ref, ref) \ + _(HREFK, R , ref, ref) \ + _(HREF, L , ref, ref) \ + _(NEWREF, S , ref, ref) \ + _(UREFO, LW, ref, lit) \ + _(UREFC, LW, ref, lit) \ + _(FREF, R , ref, lit) \ + _(STRREF, N , ref, ref) \ + \ + /* Loads and Stores. These must be in the same order. */ \ + _(ALOAD, L , ref, ___) \ + _(HLOAD, L , ref, ___) \ + _(ULOAD, L , ref, ___) \ + _(FLOAD, L , ref, lit) \ + _(XLOAD, L , ref, lit) \ + _(SLOAD, L , lit, lit) \ + _(VLOAD, L , ref, ___) \ + \ + _(ASTORE, S , ref, ref) \ + _(HSTORE, S , ref, ref) \ + _(USTORE, S , ref, ref) \ + _(FSTORE, S , ref, ref) \ + _(XSTORE, S , ref, ref) \ + \ + /* Allocations. */ \ + _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \ + _(XSNEW, A , ref, ref) \ + _(TNEW, AW, lit, lit) \ + _(TDUP, AW, ref, ___) \ + _(CNEW, AW, ref, ref) \ + _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \ + \ + /* Barriers. */ \ + _(TBAR, S , ref, ___) \ + _(OBAR, S , ref, ref) \ + _(XBAR, S , ___, ___) \ + \ + /* Type conversions. */ \ + _(CONV, NW, ref, lit) \ + _(TOBIT, N , ref, ref) \ + _(TOSTR, N , ref, ___) \ + _(STRTO, N , ref, ___) \ + \ + /* Calls. */ \ + _(CALLN, N , ref, lit) \ + _(CALLL, L , ref, lit) \ + _(CALLS, S , ref, lit) \ + _(CALLXS, S , ref, ref) \ + _(CARG, N , ref, ref) \ + \ + /* End of list. */ + +/* IR opcodes (max. 256). */ +typedef enum { +#define IRENUM(name, m, m1, m2) IR_##name, +IRDEF(IRENUM) +#undef IRENUM + IR__MAX +} IROp; + +/* Stored opcode. */ +typedef uint8_t IROp1; + +LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE); +LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE); +LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT); +LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT); +LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT); + +/* Delta between xLOAD and xSTORE. */ +#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD) + +LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE); +LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE); +LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE); +LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE); + +/* -- Named IR literals --------------------------------------------------- */ + +/* FPMATH sub-functions. ORDER FPM. */ +#define IRFPMDEF(_) \ + _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \ + _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \ + _(SIN) _(COS) _(TAN) \ + _(OTHER) + +typedef enum { +#define FPMENUM(name) IRFPM_##name, +IRFPMDEF(FPMENUM) +#undef FPMENUM + IRFPM__MAX +} IRFPMathOp; + +/* FLOAD fields. */ +#define IRFLDEF(_) \ + _(STR_LEN, offsetof(GCstr, len)) \ + _(FUNC_ENV, offsetof(GCfunc, l.env)) \ + _(TAB_META, offsetof(GCtab, metatable)) \ + _(TAB_ARRAY, offsetof(GCtab, array)) \ + _(TAB_NODE, offsetof(GCtab, node)) \ + _(TAB_ASIZE, offsetof(GCtab, asize)) \ + _(TAB_HMASK, offsetof(GCtab, hmask)) \ + _(TAB_NOMM, offsetof(GCtab, nomm)) \ + _(UDATA_META, offsetof(GCudata, metatable)) \ + _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \ + _(UDATA_FILE, sizeof(GCudata)) \ + _(CDATA_TYPEID, offsetof(GCcdata, typeid)) \ + _(CDATA_PTR, sizeof(GCcdata)) \ + _(CDATA_INT64, sizeof(GCcdata)) \ + _(CDATA_INT64HI, sizeof(GCcdata) + 4) + +typedef enum { +#define FLENUM(name, ofs) IRFL_##name, +IRFLDEF(FLENUM) +#undef FLENUM + IRFL__MAX +} IRFieldID; + +/* SLOAD mode bits, stored in op2. */ +#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */ +#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */ +#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */ +#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */ +#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */ +#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */ + +/* XLOAD mode, stored in op2. */ +#define IRXLOAD_READONLY 1 /* Load from read-only data. */ +#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */ +#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */ + +/* CONV mode, stored in op2. */ +#define IRCONV_SRCMASK 0x001f /* Source IRType. */ +#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */ +#define IRCONV_DSH 5 +#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT) +#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM) +#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */ +#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */ +#define IRCONV_MODEMASK 0x0fff +#define IRCONV_CONVMASK 0xf000 +#define IRCONV_CSH 12 +/* Number to integer conversion mode. Ordered by strength of the checks. */ +#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */ +#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */ +#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */ +#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */ + +/* -- IR operands --------------------------------------------------------- */ + +/* IR operand mode (2 bit). */ +typedef enum { + IRMref, /* IR reference. */ + IRMlit, /* 16 bit unsigned literal. */ + IRMcst, /* Constant literal: i, gcr or ptr. */ + IRMnone /* Unused operand. */ +} IRMode; +#define IRM___ IRMnone + +/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */ +#define IRM_C 0x10 + +#define IRM_N 0x00 +#define IRM_R IRM_N +#define IRM_A 0x20 +#define IRM_L 0x40 +#define IRM_S 0x60 + +#define IRM_W 0x80 + +#define IRM_NW (IRM_N|IRM_W) +#define IRM_CW (IRM_C|IRM_W) +#define IRM_AW (IRM_A|IRM_W) +#define IRM_LW (IRM_L|IRM_W) + +#define irm_op1(m) ((IRMode)((m)&3)) +#define irm_op2(m) ((IRMode)(((m)>>2)&3)) +#define irm_iscomm(m) ((m) & IRM_C) +#define irm_kind(m) ((m) & IRM_S) + +#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W), + +LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1]; + +/* -- IR instruction types ------------------------------------------------ */ + +/* Map of itypes to non-negative numbers. ORDER LJ_T. +** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for +** IRT_P32 and IRT_P64, which never escape the IR. +** The various integers are only used in the IR and can only escape to +** a TValue after implicit or explicit conversion. Their types must be +** contiguous and next to IRT_NUM (see the typerange macros below). +*/ +#define IRTDEF(_) \ + _(NIL) _(FALSE) _(TRUE) _(LIGHTUD) _(STR) _(P32) _(THREAD) \ + _(PROTO) _(FUNC) _(P64) _(CDATA) _(TAB) _(UDATA) \ + _(FLOAT) _(NUM) _(I8) _(U8) _(I16) _(U16) _(INT) _(U32) _(I64) _(U64) \ + _(SOFTFP) /* There is room for 9 more types. */ + +/* IR result type and flags (8 bit). */ +typedef enum { +#define IRTENUM(name) IRT_##name, +IRTDEF(IRTENUM) +#undef IRTENUM + + /* Native pointer type and the corresponding integer type. */ + IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32, + IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT, + IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32, + + /* Additional flags. */ + IRT_MARK = 0x20, /* Marker for misc. purposes. */ + IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */ + IRT_GUARD = 0x80, /* Instruction is a guard. */ + + /* Masks. */ + IRT_TYPE = 0x1f, + IRT_T = 0xff +} IRType; + +#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE) + +/* Stored IRType. */ +typedef struct IRType1 { uint8_t irt; } IRType1; + +#define IRT(o, t) ((uint32_t)(((o)<<8) | (t))) +#define IRTI(o) (IRT((o), IRT_INT)) +#define IRTN(o) (IRT((o), IRT_NUM)) +#define IRTG(o, t) (IRT((o), IRT_GUARD|(t))) +#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT)) + +#define irt_t(t) ((IRType)(t).irt) +#define irt_type(t) ((IRType)((t).irt & IRT_TYPE)) +#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0) +#define irt_typerange(t, first, last) \ + ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first)) + +#define irt_isnil(t) (irt_type(t) == IRT_NIL) +#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE) +#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD) +#define irt_isstr(t) (irt_type(t) == IRT_STR) +#define irt_istab(t) (irt_type(t) == IRT_TAB) +#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT) +#define irt_isnum(t) (irt_type(t) == IRT_NUM) +#define irt_isint(t) (irt_type(t) == IRT_INT) +#define irt_isi8(t) (irt_type(t) == IRT_I8) +#define irt_isu8(t) (irt_type(t) == IRT_U8) +#define irt_isi16(t) (irt_type(t) == IRT_I16) +#define irt_isu16(t) (irt_type(t) == IRT_U16) +#define irt_isu32(t) (irt_type(t) == IRT_U32) +#define irt_isi64(t) (irt_type(t) == IRT_I64) +#define irt_isu64(t) (irt_type(t) == IRT_U64) + +#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t)) +#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT)) +#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA)) +#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA)) +#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64)) + +#if LJ_64 +#define IRT_IS64 \ + ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD)) +#else +#define IRT_IS64 \ + ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)) +#endif + +#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1) + +static LJ_AINLINE IRType itype2irt(const TValue *tv) +{ + if (tvisint(tv)) + return IRT_INT; + else if (tvisnum(tv)) + return IRT_NUM; +#if LJ_64 + else if (tvislightud(tv)) + return IRT_LIGHTUD; +#endif + else + return (IRType)~itype(tv); +} + +static LJ_AINLINE uint32_t irt_toitype_(IRType t) +{ + lua_assert(!LJ_64 || t != IRT_LIGHTUD); + if (LJ_DUALNUM && t > IRT_NUM) { + return LJ_TISNUM; + } else { + lua_assert(t <= IRT_NUM); + return ~(uint32_t)t; + } +} + +#define irt_toitype(t) irt_toitype_(irt_type((t))) + +#define irt_isguard(t) ((t).irt & IRT_GUARD) +#define irt_ismarked(t) ((t).irt & IRT_MARK) +#define irt_setmark(t) ((t).irt |= IRT_MARK) +#define irt_clearmark(t) ((t).irt &= ~IRT_MARK) +#define irt_isphi(t) ((t).irt & IRT_ISPHI) +#define irt_setphi(t) ((t).irt |= IRT_ISPHI) +#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI) + +/* Stored combined IR opcode and type. */ +typedef uint16_t IROpT; + +/* -- IR references ------------------------------------------------------- */ + +/* IR references. */ +typedef uint16_t IRRef1; /* One stored reference. */ +typedef uint32_t IRRef2; /* Two stored references. */ +typedef uint32_t IRRef; /* Used to pass around references. */ + +/* Fixed references. */ +enum { + REF_BIAS = 0x8000, + REF_TRUE = REF_BIAS-3, + REF_FALSE = REF_BIAS-2, + REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */ + REF_BASE = REF_BIAS, /* /--- IR grows upwards. */ + REF_FIRST = REF_BIAS+1, + REF_DROP = 0xffff +}; + +/* Note: IRMlit operands must be < REF_BIAS, too! +** This allows for fast and uniform manipulation of all operands +** without looking up the operand mode in lj_ir_mode: +** - CSE calculates the maximum reference of two operands. +** This must work with mixed reference/literal operands, too. +** - DCE marking only checks for operand >= REF_BIAS. +** - LOOP needs to substitute reference operands. +** Constant references and literals must not be modified. +*/ + +#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16)) + +#define irref_isk(ref) ((ref) < REF_BIAS) + +/* Tagged IR references (32 bit). +** +** +-------+-------+---------------+ +** | irt | flags | ref | +** +-------+-------+---------------+ +** +** The tag holds a copy of the IRType and speeds up IR type checks. +*/ +typedef uint32_t TRef; + +#define TREF_REFMASK 0x0000ffff +#define TREF_FRAME 0x00010000 +#define TREF_CONT 0x00020000 + +#define TREF(ref, t) ((TRef)((ref) + ((t)<<24))) + +#define tref_ref(tr) ((IRRef1)(tr)) +#define tref_t(tr) ((IRType)((tr)>>24)) +#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE)) +#define tref_typerange(tr, first, last) \ + ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first)) + +#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24)) +#define tref_isnil(tr) (tref_istype((tr), IRT_NIL)) +#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE)) +#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE)) +#define tref_isstr(tr) (tref_istype((tr), IRT_STR)) +#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC)) +#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA)) +#define tref_istab(tr) (tref_istype((tr), IRT_TAB)) +#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA)) +#define tref_isnum(tr) (tref_istype((tr), IRT_NUM)) +#define tref_isint(tr) (tref_istype((tr), IRT_INT)) + +#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE)) +#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE)) +#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE)) +#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT)) +#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT)) +#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr))) +#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA)) + +#define tref_isk(tr) (irref_isk(tref_ref((tr)))) +#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2)))) + +#define TREF_PRI(t) (TREF(REF_NIL-(t), (t))) +#define TREF_NIL (TREF_PRI(IRT_NIL)) +#define TREF_FALSE (TREF_PRI(IRT_FALSE)) +#define TREF_TRUE (TREF_PRI(IRT_TRUE)) + +/* -- IR format ----------------------------------------------------------- */ + +/* IR instruction format (64 bit). +** +** 16 16 8 8 8 8 +** +-------+-------+---+---+---+---+ +** | op1 | op2 | t | o | r | s | +** +-------+-------+---+---+---+---+ +** | op12/i/gco | ot | prev | (alternative fields in union) +** +---------------+-------+-------+ +** 32 16 16 +** +** prev is only valid prior to register allocation and then reused for r + s. +*/ + +typedef union IRIns { + struct { + LJ_ENDIAN_LOHI( + IRRef1 op1; /* IR operand 1. */ + , IRRef1 op2; /* IR operand 2. */ + ) + IROpT ot; /* IR opcode and type (overlaps t and o). */ + IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */ + }; + struct { + IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */ + LJ_ENDIAN_LOHI( + IRType1 t; /* IR type. */ + , IROp1 o; /* IR opcode. */ + ) + LJ_ENDIAN_LOHI( + uint8_t r; /* Register allocation (overlaps prev). */ + , uint8_t s; /* Spill slot allocation (overlaps prev). */ + ) + }; + int32_t i; /* 32 bit signed integer literal (overlaps op12). */ + GCRef gcr; /* GCobj constant (overlaps op12). */ + MRef ptr; /* Pointer constant (overlaps op12). */ +} IRIns; + +#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr)) +#define ir_kstr(ir) (gco2str(ir_kgc((ir)))) +#define ir_ktab(ir) (gco2tab(ir_kgc((ir)))) +#define ir_kfunc(ir) (gco2func(ir_kgc((ir)))) +#define ir_kcdata(ir) (gco2cd(ir_kgc((ir)))) +#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue)) +#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) +#define ir_k64(ir) \ + check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) +#define ir_kptr(ir) \ + check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void)) + +/* A store or any other op with a non-weak guard has a side-effect. */ +static LJ_AINLINE int ir_sideeff(IRIns *ir) +{ + return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S); +} + +LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W); + +#endif diff --git a/src/luajit2/src/lj_ircall.h b/src/luajit2/src/lj_ircall.h new file mode 100644 index 0000000000..3b0a54b6f4 --- /dev/null +++ b/src/luajit2/src/lj_ircall.h @@ -0,0 +1,224 @@ +/* +** IR CALL* instruction definitions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IRCALL_H +#define _LJ_IRCALL_H + +#include "lj_obj.h" +#include "lj_ir.h" +#include "lj_jit.h" + +/* C call info for CALL* instructions. */ +typedef struct CCallInfo { + ASMFunction func; /* Function pointer. */ + uint32_t flags; /* Number of arguments and flags. */ +} CCallInfo; + +#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */ +#define CCI_NARGS_MAX 32 /* Max. # of args. */ + +#define CCI_OTSHIFT 16 +#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */ +#define CCI_OPSHIFT 24 +#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */ + +#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT) +#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT) +#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT) +#define CCI_CALL_FN (CCI_CALL_N|CCI_FASTCALL) +#define CCI_CALL_FL (CCI_CALL_L|CCI_FASTCALL) +#define CCI_CALL_FS (CCI_CALL_S|CCI_FASTCALL) + +/* C call info flags. */ +#define CCI_L 0x0100 /* Implicit L arg. */ +#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */ +#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */ +#define CCI_FASTCALL 0x0800 /* Fastcall convention. */ + +/* Function definitions for CALL* instructions. */ +#if LJ_SOFTFP +#if LJ_HASFFI +#define IRCALLDEF_SOFTFP_FFI(_) \ + _(softfp_ui2d, 1, N, NUM, 0) \ + _(softfp_l2d, 2, N, NUM, 0) \ + _(softfp_ul2d, 2, N, NUM, 0) \ + _(softfp_f2d, 1, N, NUM, 0) \ + _(softfp_d2ui, 2, N, INT, 0) \ + _(softfp_d2l, 2, N, I64, 0) \ + _(softfp_d2ul, 2, N, U64, 0) \ + _(softfp_d2f, 2, N, FLOAT, 0) \ + _(softfp_i2f, 1, N, FLOAT, 0) \ + _(softfp_ui2f, 1, N, FLOAT, 0) \ + _(softfp_l2f, 2, N, FLOAT, 0) \ + _(softfp_ul2f, 2, N, FLOAT, 0) \ + _(softfp_f2i, 1, N, INT, 0) \ + _(softfp_f2ui, 1, N, INT, 0) \ + _(softfp_f2l, 1, N, I64, 0) \ + _(softfp_f2ul, 1, N, U64, 0) +#else +#define IRCALLDEF_SOFTFP_FFI(_) +#endif +#define IRCALLDEF_SOFTFP(_) \ + _(lj_vm_tobit, 2, N, INT, 0) \ + _(softfp_add, 4, N, NUM, 0) \ + _(softfp_sub, 4, N, NUM, 0) \ + _(softfp_mul, 4, N, NUM, 0) \ + _(softfp_div, 4, N, NUM, 0) \ + _(softfp_cmp, 4, N, NIL, 0) \ + _(softfp_i2d, 1, N, NUM, 0) \ + _(softfp_d2i, 2, N, INT, 0) \ + IRCALLDEF_SOFTFP_FFI(_) +#else +#define IRCALLDEF_SOFTFP(_) +#endif + +#if LJ_TARGET_X86ORX64 +/* Use lj_vm_* helpers and x87 ops. */ +#define IRCALLDEF_FPMATH(_) +#else +/* Use standard math library calls. */ +#if LJ_SOFTFP +#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */ +#else +#define ARG1_FP 1 +#endif +/* ORDER FPM */ +#define IRCALLDEF_FPMATH(_) \ + _(lj_vm_floor, ARG1_FP, N, NUM, 0) \ + _(lj_vm_ceil, ARG1_FP, N, NUM, 0) \ + _(lj_vm_trunc, ARG1_FP, N, NUM, 0) \ + _(sqrt, ARG1_FP, N, NUM, 0) \ + _(exp, ARG1_FP, N, NUM, 0) \ + _(lj_vm_exp2, ARG1_FP, N, NUM, 0) \ + _(log, ARG1_FP, N, NUM, 0) \ + _(lj_vm_log2, ARG1_FP, N, NUM, 0) \ + _(log10, ARG1_FP, N, NUM, 0) \ + _(sin, ARG1_FP, N, NUM, 0) \ + _(cos, ARG1_FP, N, NUM, 0) \ + _(tan, ARG1_FP, N, NUM, 0) \ + _(lj_vm_powi, ARG1_FP+1, N, NUM, 0) \ + _(pow, ARG1_FP*2, N, NUM, 0) \ + _(atan2, ARG1_FP*2, N, NUM, 0) \ + _(ldexp, ARG1_FP+1, N, NUM, 0) +#endif + +#if LJ_HASFFI +#if LJ_32 +#define ARG2_64 4 /* Treat as 4 32 bit arguments. */ +#define IRCALLDEF_FFI32(_) \ + _(lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) +#else +#define ARG2_64 2 +#define IRCALLDEF_FFI32(_) +#endif +#define IRCALLDEF_FFI(_) \ + IRCALLDEF_FFI32(_) \ + _(lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ + _(lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ + _(lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ + _(lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ + _(lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ + _(lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ + _(lj_cdata_setfin, 2, FN, P32, CCI_L) \ + _(strlen, 1, N, INTP, 0) \ + _(memcpy, 3, S, PTR, 0) \ + _(memset, 3, S, PTR, 0) +#else +#define IRCALLDEF_FFI(_) +#endif + +#define IRCALLDEF(_) \ + _(lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \ + _(lj_str_new, 3, S, STR, CCI_L) \ + _(lj_str_tonum, 2, FN, INT, 0) \ + _(lj_str_fromint, 2, FN, STR, CCI_L) \ + _(lj_str_fromnum, 2, FN, STR, CCI_L) \ + _(lj_tab_new1, 2, FS, TAB, CCI_L) \ + _(lj_tab_dup, 2, FS, TAB, CCI_L) \ + _(lj_tab_newkey, 3, S, P32, CCI_L) \ + _(lj_tab_len, 1, FL, INT, 0) \ + _(lj_gc_step_jit, 2, FS, NIL, CCI_L) \ + _(lj_gc_barrieruv, 2, FS, NIL, 0) \ + _(lj_mem_newgco, 2, FS, P32, CCI_L) \ + _(lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_NOFPRCLOBBER) \ + _(lj_vm_modi, 2, FN, INT, 0) \ + IRCALLDEF_SOFTFP(_) \ + IRCALLDEF_FPMATH(_) \ + IRCALLDEF_FFI(_) \ + _(sinh, 1, N, NUM, 0) \ + _(cosh, 1, N, NUM, 0) \ + _(tanh, 1, N, NUM, 0) \ + _(fputc, 2, S, INT, 0) \ + _(fwrite, 4, S, INT, 0) \ + _(fflush, 1, S, INT, 0) \ + \ + /* End of list. */ + +typedef enum { +#define IRCALLENUM(name, nargs, kind, type, flags) IRCALL_##name, +IRCALLDEF(IRCALLENUM) +#undef IRCALLENUM + IRCALL__MAX +} IRCallID; + +LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...); + +LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1]; + +/* Soft-float declarations. */ +#if LJ_SOFTFP +#if LJ_TARGET_ARM +#define softfp_add __aeabi_dadd +#define softfp_sub __aeabi_dsub +#define softfp_mul __aeabi_dmul +#define softfp_div __aeabi_ddiv +#define softfp_cmp __aeabi_cdcmple +#define softfp_i2d __aeabi_i2d +#define softfp_ui2d __aeabi_ui2d +#define softfp_l2d __aeabi_l2d +#define softfp_ul2d __aeabi_ul2d +#define softfp_f2d __aeabi_f2d +#define softfp_d2i __aeabi_d2iz +#define softfp_d2ui __aeabi_d2uiz +#define softfp_d2l __aeabi_d2lz +#define softfp_d2ul __aeabi_d2ulz +#define softfp_d2f __aeabi_d2f +#define softfp_i2f __aeabi_i2f +#define softfp_ui2f __aeabi_ui2f +#define softfp_l2f __aeabi_l2f +#define softfp_ul2f __aeabi_ul2f +#define softfp_f2i __aeabi_f2iz +#define softfp_f2ui __aeabi_f2uiz +#define softfp_f2l __aeabi_f2lz +#define softfp_f2ul __aeabi_f2ulz +#else +#error "Missing soft-float definitions for target architecture" +#endif +extern double softfp_add(double a, double b); +extern double softfp_sub(double a, double b); +extern double softfp_mul(double a, double b); +extern double softfp_div(double a, double b); +extern void softfp_cmp(double a, double b); +extern double softfp_i2d(int32_t a); +extern double softfp_ui2d(uint32_t a); +extern double softfp_l2d(int64_t a); +extern double softfp_ul2d(uint64_t a); +extern double softfp_f2d(float a); +extern int32_t softfp_d2i(double a); +extern uint32_t softfp_d2ui(double a); +extern int64_t softfp_d2l(double a); +extern uint64_t softfp_d2ul(double a); +extern float softfp_d2f(double a); +extern float softfp_i2f(int32_t a); +extern float softfp_ui2f(uint32_t a); +extern float softfp_l2f(int64_t a); +extern float softfp_ul2f(uint64_t a); +extern int32_t softfp_f2i(float a); +extern uint32_t softfp_f2ui(float a); +extern int64_t softfp_f2l(float a); +extern uint64_t softfp_f2ul(float a); +#endif + +#endif diff --git a/src/luajit2/src/lj_iropt.h b/src/luajit2/src/lj_iropt.h new file mode 100644 index 0000000000..3059fb9e82 --- /dev/null +++ b/src/luajit2/src/lj_iropt.h @@ -0,0 +1,159 @@ +/* +** Common header for IR emitter and optimizations. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_IROPT_H +#define _LJ_IROPT_H + +#include <stdarg.h> + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* IR emitter. */ +LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J); + +/* Save current IR in J->fold.ins, but do not emit it (yet). */ +static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b) +{ + J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b; +} + +#define lj_ir_set(J, ot, a, b) \ + lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b)) + +/* Get ref of next IR instruction and optionally grow IR. +** Note: this may invalidate all IRIns*! +*/ +static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J) +{ + IRRef ref = J->cur.nins; + if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J); + J->cur.nins = ref + 1; + return ref; +} + +/* Interning of constants. */ +LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); +LJ_FUNC void lj_ir_k64_freeall(jit_State *J); +LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv); +LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); +LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); +LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t); +LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr); +LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t); +LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot); + +#if LJ_64 +#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k)) +#else +#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k)) +#endif + +static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n) +{ + TValue tv; + tv.n = n; + return lj_ir_knum_u64(J, tv.u64); +} + +#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR) +#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB) +#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC) +#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr)) +#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr)) + +/* Special FP constants. */ +#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000)) +#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000)) +#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000)) + +/* Special 128 bit SIMD constants. */ +#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS)) +#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG)) + +/* Access to constants. */ +LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir); + +/* Convert IR operand types. */ +LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr); + +/* Miscellaneous IR ops. */ +LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op); +LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op); +LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref); + +/* Emit IR instructions with on-the-fly optimizations. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim); + +/* Special return values for the fold functions. */ +enum { + NEXTFOLD, /* Couldn't fold, pass on. */ + RETRYFOLD, /* Retry fold with modified fins. */ + KINTFOLD, /* Return ref for int constant in fins->i. */ + FAILFOLD, /* Guard would always fail. */ + DROPFOLD, /* Guard eliminated. */ + MAX_FOLD +}; + +#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD) +#define INT64FOLD(k) (lj_ir_kint64(J, (k))) +#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond)) +#define LEFTFOLD (J->fold.ins.op1) +#define RIGHTFOLD (J->fold.ins.op2) +#define CSEFOLD (lj_opt_cse(J)) +#define EMITFOLD (lj_ir_emit(J)) + +/* Load/store forwarding. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J); +LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J); +LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim); +LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref); + +/* Dead-store elimination. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J); + +/* Narrowing. */ +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr); +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr); +#if LJ_HASFFI +LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key); +#endif +LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, + TValue *vb, TValue *vc, IROp op); +LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc); +LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc); +LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc); +LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase); + +/* Optimization passes. */ +LJ_FUNC void lj_opt_dce(jit_State *J); +LJ_FUNC int lj_opt_loop(jit_State *J); +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) +LJ_FUNC void lj_opt_split(jit_State *J); +#else +#define lj_opt_split(J) UNUSED(J) +#endif + +#endif + +#endif diff --git a/src/luajit2/src/lj_jit.h b/src/luajit2/src/lj_jit.h new file mode 100644 index 0000000000..ea2dd4adaf --- /dev/null +++ b/src/luajit2/src/lj_jit.h @@ -0,0 +1,371 @@ +/* +** Common definitions for the JIT compiler. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_JIT_H +#define _LJ_JIT_H + +#include "lj_obj.h" +#include "lj_ir.h" + +/* JIT engine flags. */ +#define JIT_F_ON 0x00000001 + +/* CPU-specific JIT engine flags. */ +#if LJ_TARGET_X86ORX64 +#define JIT_F_CMOV 0x00000010 +#define JIT_F_SSE2 0x00000020 +#define JIT_F_SSE3 0x00000040 +#define JIT_F_SSE4_1 0x00000080 +#define JIT_F_P4 0x00000100 +#define JIT_F_PREFER_IMUL 0x00000200 +#define JIT_F_SPLIT_XMM 0x00000400 +#define JIT_F_LEA_AGU 0x00000800 + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_CMOV +#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM" +#elif LJ_TARGET_ARM +#define JIT_F_ARMV6 0x00000010 +#define JIT_F_ARMV6T2 0x00000020 +#define JIT_F_ARMV7 0x00000040 + +/* Names for the CPU-specific flags. Must match the order above. */ +#define JIT_F_CPU_FIRST JIT_F_ARMV6 +#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7" +#else +#define JIT_F_CPU_FIRST 0 +#define JIT_F_CPUSTRING "" +#endif + +/* Optimization flags. */ +#define JIT_F_OPT_MASK 0x0fff0000 + +#define JIT_F_OPT_FOLD 0x00010000 +#define JIT_F_OPT_CSE 0x00020000 +#define JIT_F_OPT_DCE 0x00040000 +#define JIT_F_OPT_FWD 0x00080000 +#define JIT_F_OPT_DSE 0x00100000 +#define JIT_F_OPT_NARROW 0x00200000 +#define JIT_F_OPT_LOOP 0x00400000 +#define JIT_F_OPT_ABC 0x00800000 +#define JIT_F_OPT_FUSE 0x01000000 + +/* Optimizations names for -O. Must match the order above. */ +#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD +#define JIT_F_OPTSTRING \ + "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4fuse" + +/* Optimization levels set a fixed combination of flags. */ +#define JIT_F_OPT_0 0 +#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE) +#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP) +#define JIT_F_OPT_3 \ + (JIT_F_OPT_2|JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_FUSE) +#define JIT_F_OPT_DEFAULT JIT_F_OPT_3 + +#if LJ_TARGET_WINDOWS || LJ_64 +/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */ +#define JIT_P_sizemcode_DEFAULT 64 +#else +/* Could go as low as 4K, but the mmap() overhead would be rather high. */ +#define JIT_P_sizemcode_DEFAULT 32 +#endif + +/* Optimization parameters and their defaults. Length is a char in octal! */ +#define JIT_PARAMDEF(_) \ + _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \ + _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \ + _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \ + _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \ + _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \ + \ + _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \ + _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \ + _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \ + \ + _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \ + _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \ + _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \ + _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \ + \ + /* Size of each machine code area (in KBytes). */ \ + _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \ + /* Max. total size of all machine code areas (in KBytes). */ \ + _(\010, maxmcode, 512) \ + /* End of list. */ + +enum { +#define JIT_PARAMENUM(len, name, value) JIT_P_##name, +JIT_PARAMDEF(JIT_PARAMENUM) +#undef JIT_PARAMENUM + JIT_P__MAX +}; + +#define JIT_PARAMSTR(len, name, value) #len #name +#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR) + +/* Trace compiler state. */ +typedef enum { + LJ_TRACE_IDLE, /* Trace compiler idle. */ + LJ_TRACE_ACTIVE = 0x10, + LJ_TRACE_RECORD, /* Bytecode recording active. */ + LJ_TRACE_START, /* New trace started. */ + LJ_TRACE_END, /* End of trace. */ + LJ_TRACE_ASM, /* Assemble trace. */ + LJ_TRACE_ERR /* Trace aborted with error. */ +} TraceState; + +/* Post-processing action. */ +typedef enum { + LJ_POST_NONE, /* No action. */ + LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */ + LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */ + LJ_POST_FIXBOOL, /* Fixup boolean result. */ + LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */ +} PostProc; + +/* Machine code type. */ +#if LJ_TARGET_X86ORX64 +typedef uint8_t MCode; +#else +typedef uint32_t MCode; +#endif + +/* Stack snapshot header. */ +typedef struct SnapShot { + uint16_t mapofs; /* Offset into snapshot map. */ + IRRef1 ref; /* First IR ref for this snapshot. */ + uint8_t nslots; /* Number of valid slots. */ + uint8_t nent; /* Number of compressed entries. */ + uint8_t depth; /* Number of frame links. */ + uint8_t count; /* Count of taken exits for this snapshot. */ +} SnapShot; + +#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */ + +/* Compressed snapshot entry. */ +typedef uint32_t SnapEntry; + +#define SNAP_FRAME 0x010000 /* Frame slot. */ +#define SNAP_CONT 0x020000 /* Continuation slot. */ +#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */ +#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */ +LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME); +LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT); + +#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref)) +#define SNAP_TR(slot, tr) \ + (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK))) +#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc)) +#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz)) +#define snap_ref(sn) ((sn) & 0xffff) +#define snap_slot(sn) ((BCReg)((sn) >> 24)) +#define snap_isframe(sn) ((sn) & SNAP_FRAME) +#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn)) +#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref)) + +/* Snapshot and exit numbers. */ +typedef uint32_t SnapNo; +typedef uint32_t ExitNo; + +/* Trace number. */ +typedef uint32_t TraceNo; /* Used to pass around trace numbers. */ +typedef uint16_t TraceNo1; /* Stored trace number. */ + +#define TRACE_INTERP 0 /* Fallback to interpreter. */ + +/* Trace object. */ +typedef struct GCtrace { + GCHeader; + uint8_t topslot; /* Top stack slot already checked to be allocated. */ + uint8_t unused1; + IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */ + GCRef gclist; + IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */ + IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */ + uint16_t nsnap; /* Number of snapshots. */ + uint16_t nsnapmap; /* Number of snapshot map elements. */ + SnapShot *snap; /* Snapshot array. */ + SnapEntry *snapmap; /* Snapshot map. */ + GCRef startpt; /* Starting prototype. */ + MRef startpc; /* Bytecode PC of starting instruction. */ + BCIns startins; /* Original bytecode of starting instruction. */ + MSize szmcode; /* Size of machine code. */ + MCode *mcode; /* Start of machine code. */ + MSize mcloop; /* Offset of loop start in machine code. */ + uint16_t nchild; /* Number of child traces (root trace only). */ + uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */ + TraceNo1 traceno; /* Trace number. */ + TraceNo1 link; /* Linked trace (or self for loops). */ + TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */ + TraceNo1 nextroot; /* Next root trace for same prototype. */ + TraceNo1 nextside; /* Next side trace of same root trace. */ + uint16_t unused2; +#ifdef LUAJIT_USE_GDBJIT + void *gdbjit_entry; /* GDB JIT entry. */ +#endif +} GCtrace; + +#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o)) +#define traceref(J, n) \ + check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)])) + +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist)); + +/* Round-robin penalty cache for bytecodes leading to aborted traces. */ +typedef struct HotPenalty { + MRef pc; /* Starting bytecode PC. */ + uint16_t val; /* Penalty value, i.e. hotcount start. */ + uint16_t reason; /* Abort reason (really TraceErr). */ +} HotPenalty; + +#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */ +#define PENALTY_MIN 36 /* Minimum penalty value. */ +#define PENALTY_MAX 60000 /* Maximum penalty value. */ +#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */ + +/* Round-robin backpropagation cache for narrowing conversions. */ +typedef struct BPropEntry { + IRRef1 key; /* Key: original reference. */ + IRRef1 val; /* Value: reference after conversion. */ + IRRef mode; /* Mode for this entry (currently IRCONV_*). */ +} BPropEntry; + +/* Number of slots for the backpropagation cache. Must be a power of 2. */ +#define BPROP_SLOTS 16 + +/* Scalar evolution analysis cache. */ +typedef struct ScEvEntry { + IRRef1 idx; /* Index reference. */ + IRRef1 start; /* Constant start reference. */ + IRRef1 stop; /* Constant stop reference. */ + IRRef1 step; /* Constant step reference. */ + IRType1 t; /* Scalar type. */ + uint8_t dir; /* Direction. 1: +, 0: -. */ +} ScEvEntry; + +/* 128 bit SIMD constants. */ +enum { + LJ_KSIMD_ABS, + LJ_KSIMD_NEG, + LJ_KSIMD__MAX +}; + +/* Get 16 byte aligned pointer to SIMD constant. */ +#define LJ_KSIMD(J, n) \ + ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15)) + +/* Set/reset flag to activate the SPLIT pass for the current trace. */ +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) +#define lj_needsplit(J) (J->needsplit = 1) +#define lj_resetsplit(J) (J->needsplit = 0) +#else +#define lj_needsplit(J) UNUSED(J) +#define lj_resetsplit(J) UNUSED(J) +#endif + +/* Fold state is used to fold instructions on-the-fly. */ +typedef struct FoldState { + IRIns ins; /* Currently emitted instruction. */ + IRIns left; /* Instruction referenced by left operand. */ + IRIns right; /* Instruction referenced by right operand. */ +} FoldState; + +/* JIT compiler state. */ +typedef struct jit_State { + GCtrace cur; /* Current trace. */ + + lua_State *L; /* Current Lua state. */ + const BCIns *pc; /* Current PC. */ + GCfunc *fn; /* Current function. */ + GCproto *pt; /* Current prototype. */ + TRef *base; /* Current frame base, points into J->slots. */ + + uint32_t flags; /* JIT engine flags. */ + BCReg maxslot; /* Relative to baseslot. */ + BCReg baseslot; /* Current frame base, offset into J->slots. */ + + uint8_t mergesnap; /* Allowed to merge with next snapshot. */ + uint8_t needsnap; /* Need snapshot before recording next bytecode. */ + IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */ + uint8_t bcskip; /* Number of bytecode instructions to skip. */ + + FoldState fold; /* Fold state. */ + + const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */ + MSize bc_extent; /* Extent of the range. */ + + TraceState state; /* Trace compiler state. */ + + int32_t instunroll; /* Unroll counter for instable loops. */ + int32_t loopunroll; /* Unroll counter for loop ops in side traces. */ + int32_t tailcalled; /* Number of successive tailcalls. */ + int32_t framedepth; /* Current frame depth. */ + int32_t retdepth; /* Return frame depth (count of RETF). */ + + MRef k64; /* Pointer to chained array of 64 bit constants. */ + TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ + + IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */ + IRRef irtoplim; /* Upper limit of instuction buffer (biased). */ + IRRef irbotlim; /* Lower limit of instuction buffer (biased). */ + IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */ + + MSize sizesnap; /* Size of temp. snapshot buffer. */ + SnapShot *snapbuf; /* Temp. snapshot buffer. */ + SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */ + MSize sizesnapmap; /* Size of temp. snapshot map buffer. */ + + PostProc postproc; /* Required post-processing after execution. */ +#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) + int needsplit; /* Need SPLIT pass. */ +#endif + + GCRef *trace; /* Array of traces. */ + TraceNo freetrace; /* Start of scan for next free trace. */ + MSize sizetrace; /* Size of trace array. */ + + IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */ + TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */ + + int32_t param[JIT_P__MAX]; /* JIT engine parameters. */ + + MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */ + + HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ + uint32_t penaltyslot; /* Round-robin index into penalty slots. */ + uint32_t prngstate; /* PRNG state. */ + + BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */ + uint32_t bpropslot; /* Round-robin index into bpropcache slots. */ + + ScEvEntry scev; /* Scalar evolution analysis cache slots. */ + + const BCIns *startpc; /* Bytecode PC of starting instruction. */ + TraceNo parent; /* Parent of current side trace (0 for root traces). */ + ExitNo exitno; /* Exit number in parent of current side trace. */ + + BCIns *patchpc; /* PC for pending re-patch. */ + BCIns patchins; /* Instruction for pending re-patch. */ + + int mcprot; /* Protection of current mcode area. */ + MCode *mcarea; /* Base of current mcode area. */ + MCode *mctop; /* Top of current mcode area. */ + MCode *mcbot; /* Bottom of current mcode area. */ + size_t szmcarea; /* Size of current mcode area. */ + size_t szallmcarea; /* Total size of all allocated mcode areas. */ + + TValue errinfo; /* Additional info element for trace errors. */ +} LJ_ALIGN(16) jit_State; + +/* Trivial PRNG e.g. used for penalty randomization. */ +static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) +{ + /* Yes, this LCG is very weak, but that doesn't matter for our use case. */ + J->prngstate = J->prngstate * 1103515245 + 12345; + return J->prngstate >> (32-bits); +} + +#endif diff --git a/src/luajit2/src/lj_lex.c b/src/luajit2/src/lj_lex.c new file mode 100644 index 0000000000..01e0c641a7 --- /dev/null +++ b/src/luajit2/src/lj_lex.c @@ -0,0 +1,507 @@ +/* +** Lexical analyzer. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_lex_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#if LJ_HASFFI +#include "lj_tab.h" +#include "lj_ctype.h" +#include "lj_cdata.h" +#include "lualib.h" +#endif +#include "lj_state.h" +#include "lj_lex.h" +#include "lj_parse.h" +#include "lj_char.h" + +/* Lua lexer token names. */ +static const char *const tokennames[] = { +#define TKSTR1(name) #name, +#define TKSTR2(name, sym) #sym, +TKDEF(TKSTR1, TKSTR2) +#undef TKSTR1 +#undef TKSTR2 + NULL +}; + +/* -- Buffer handling ----------------------------------------------------- */ + +#define char2int(c) ((int)(uint8_t)(c)) +#define next(ls) \ + (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls)) +#define save_and_next(ls) (save(ls, ls->current), next(ls)) +#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r') +#define END_OF_STREAM (-1) + +static int fillbuf(LexState *ls) +{ + size_t sz; + const char *buf = ls->rfunc(ls->L, ls->rdata, &sz); + if (buf == NULL || sz == 0) return END_OF_STREAM; + ls->n = (MSize)sz - 1; + ls->p = buf; + return char2int(*(ls->p++)); +} + +static LJ_NOINLINE void save_grow(LexState *ls, int c) +{ + MSize newsize; + if (ls->sb.sz >= LJ_MAX_STR/2) + lj_lex_error(ls, 0, LJ_ERR_XELEM); + newsize = ls->sb.sz * 2; + lj_str_resizebuf(ls->L, &ls->sb, newsize); + ls->sb.buf[ls->sb.n++] = (char)c; +} + +static LJ_AINLINE void save(LexState *ls, int c) +{ + if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz)) + save_grow(ls, c); + else + ls->sb.buf[ls->sb.n++] = (char)c; +} + +static void inclinenumber(LexState *ls) +{ + int old = ls->current; + lua_assert(currIsNewline(ls)); + next(ls); /* skip `\n' or `\r' */ + if (currIsNewline(ls) && ls->current != old) + next(ls); /* skip `\n\r' or `\r\n' */ + if (++ls->linenumber >= LJ_MAX_LINE) + lj_lex_error(ls, ls->token, LJ_ERR_XLINES); +} + +/* -- Scanner for terminals ----------------------------------------------- */ + +#if LJ_HASFFI +/* Load FFI library on-demand. Needed if we create cdata objects. */ +static void lex_loadffi(lua_State *L) +{ + ptrdiff_t oldtop = savestack(L, L->top); + luaopen_ffi(L); + L->top = restorestack(L, oldtop); +} + +/* Parse 64 bit integer. */ +static int lex_number64(LexState *ls, TValue *tv) +{ + uint64_t n = 0; + uint8_t *p = (uint8_t *)ls->sb.buf; + CTypeID id = CTID_INT64; + GCcdata *cd; + int numl = 0; + if (p[0] == '0' && (p[1] & ~0x20) == 'X') { /* Hexadecimal. */ + p += 2; + if (!lj_char_isxdigit(*p)) return 0; + do { + n = n*16 + (*p & 15); + if (!lj_char_isdigit(*p)) n += 9; + p++; + } while (lj_char_isxdigit(*p)); + } else { /* Decimal. */ + if (!lj_char_isdigit(*p)) return 0; + do { + n = n*10 + (*p - '0'); + p++; + } while (lj_char_isdigit(*p)); + } + for (;;) { /* Parse suffixes. */ + if ((*p & ~0x20) == 'U') + id = CTID_UINT64; + else if ((*p & ~0x20) == 'L') + numl++; + else + break; + p++; + } + if (numl != 2 || *p != '\0') return 0; + /* Return cdata holding a 64 bit integer. */ + cd = lj_cdata_new_(ls->L, id, 8); + *(uint64_t *)cdataptr(cd) = n; + lj_parse_keepcdata(ls, tv, cd); + return 1; /* Ok. */ +} +#endif + +/* Parse a number literal. */ +static void lex_number(LexState *ls, TValue *tv) +{ + int c; + lua_assert(lj_char_isdigit(ls->current)); + do { + c = ls->current; + save_and_next(ls); + } while (lj_char_isident(ls->current) || ls->current == '.' || + ((ls->current == '-' || ls->current == '+') && + ((c & ~0x20) == 'E' || (c & ~0x20) == 'P'))); +#if LJ_HASFFI + c &= ~0x20; + if ((c == 'I' || c == 'L' || c == 'U') && !ctype_ctsG(G(ls->L))) + lex_loadffi(ls->L); + if (c == 'I') /* Parse imaginary part of complex number. */ + ls->sb.n--; +#endif + save(ls, '\0'); +#if LJ_HASFFI + if ((c == 'L' || c == 'U') && lex_number64(ls, tv)) { /* Parse 64 bit int. */ + return; + } else +#endif + if (lj_str_numconv(ls->sb.buf, tv)) { +#if LJ_HASFFI + if (c == 'I') { /* Return cdata holding a complex number. */ + GCcdata *cd = lj_cdata_new_(ls->L, CTID_COMPLEX_DOUBLE, 2*sizeof(double)); + ((double *)cdataptr(cd))[0] = 0; + ((double *)cdataptr(cd))[1] = numberVnum(tv); + lj_parse_keepcdata(ls, tv, cd); + } +#endif + if (LJ_DUALNUM && tvisnum(tv)) { + int32_t k = lj_num2int(numV(tv)); + if ((lua_Number)k == numV(tv)) /* -0 cannot end up here. */ + setintV(tv, k); + } + return; + } + lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); +} + +static int skip_sep(LexState *ls) +{ + int count = 0; + int s = ls->current; + lua_assert(s == '[' || s == ']'); + save_and_next(ls); + while (ls->current == '=') { + save_and_next(ls); + count++; + } + return (ls->current == s) ? count : (-count) - 1; +} + +static void read_long_string(LexState *ls, TValue *tv, int sep) +{ + save_and_next(ls); /* skip 2nd `[' */ + if (currIsNewline(ls)) /* string starts with a newline? */ + inclinenumber(ls); /* skip it */ + for (;;) { + switch (ls->current) { + case END_OF_STREAM: + lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM); + break; + case ']': + if (skip_sep(ls) == sep) { + save_and_next(ls); /* skip 2nd `]' */ + goto endloop; + } + break; + case '\n': + case '\r': + save(ls, '\n'); + inclinenumber(ls); + if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */ + break; + default: + if (tv) save_and_next(ls); + else next(ls); + break; + } + } endloop: + if (tv) { + GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep), + ls->sb.n - 2*(2 + (MSize)sep)); + setstrV(ls->L, tv, str); + } +} + +static void read_string(LexState *ls, int delim, TValue *tv) +{ + save_and_next(ls); + while (ls->current != delim) { + switch (ls->current) { + case END_OF_STREAM: + lj_lex_error(ls, TK_eof, LJ_ERR_XSTR); + continue; + case '\n': + case '\r': + lj_lex_error(ls, TK_string, LJ_ERR_XSTR); + continue; + case '\\': { + int c; + next(ls); /* Skip the '\\'. */ + switch (ls->current) { + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case 'x': /* Hexadecimal escape '\xXX'. */ + c = (next(ls) & 15u) << 4; + if (!lj_char_isdigit(ls->current)) { + if (!lj_char_isxdigit(ls->current)) goto err_xesc; + c += 9 << 4; + } + c += (next(ls) & 15u); + if (!lj_char_isdigit(ls->current)) { + if (!lj_char_isxdigit(ls->current)) goto err_xesc; + c += 9; + } + break; + case '*': /* Skip whitespace. */ + next(ls); + while (lj_char_isspace(ls->current)) + if (currIsNewline(ls)) inclinenumber(ls); else next(ls); + continue; + case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue; + case END_OF_STREAM: continue; + default: + if (!lj_char_isdigit(ls->current)) { + save_and_next(ls); /* Handles '\\', '\"' and "\'". */ + } else { /* Decimal escape '\ddd'. */ + c = (ls->current - '0'); + if (lj_char_isdigit(next(ls))) { + c = c*10 + (ls->current - '0'); + if (lj_char_isdigit(next(ls))) { + c = c*10 + (ls->current - '0'); + if (c > 255) { + err_xesc: + lj_lex_error(ls, TK_string, LJ_ERR_XESC); + } + next(ls); + } + } + save(ls, c); + } + continue; + } + save(ls, c); + next(ls); + continue; + } + default: + save_and_next(ls); + break; + } + } + save_and_next(ls); /* skip delimiter */ + setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2)); +} + +/* -- Main lexical scanner ------------------------------------------------ */ + +static int llex(LexState *ls, TValue *tv) +{ + lj_str_resetbuf(&ls->sb); + for (;;) { + if (lj_char_isident(ls->current)) { + GCstr *s; + if (lj_char_isdigit(ls->current)) { /* Numeric literal. */ + lex_number(ls, tv); + return TK_number; + } + /* Identifier or reserved word. */ + do { + save_and_next(ls); + } while (lj_char_isident(ls->current)); + s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n); + if (s->reserved > 0) /* Reserved word? */ + return TK_OFS + s->reserved; + setstrV(ls->L, tv, s); + return TK_name; + } + switch (ls->current) { + case '\n': + case '\r': + inclinenumber(ls); + continue; + case ' ': + case '\t': + case '\v': + case '\f': + next(ls); + continue; + case '-': + next(ls); + if (ls->current != '-') return '-'; + /* else is a comment */ + next(ls); + if (ls->current == '[') { + int sep = skip_sep(ls); + lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */ + if (sep >= 0) { + read_long_string(ls, NULL, sep); /* long comment */ + lj_str_resetbuf(&ls->sb); + continue; + } + } + /* else short comment */ + while (!currIsNewline(ls) && ls->current != END_OF_STREAM) + next(ls); + continue; + case '[': { + int sep = skip_sep(ls); + if (sep >= 0) { + read_long_string(ls, tv, sep); + return TK_string; + } else if (sep == -1) { + return '['; + } else { + lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM); + continue; + } + } + case '=': + next(ls); + if (ls->current != '=') return '='; else { next(ls); return TK_eq; } + case '<': + next(ls); + if (ls->current != '=') return '<'; else { next(ls); return TK_le; } + case '>': + next(ls); + if (ls->current != '=') return '>'; else { next(ls); return TK_ge; } + case '~': + next(ls); + if (ls->current != '=') return '~'; else { next(ls); return TK_ne; } + case '"': + case '\'': + read_string(ls, ls->current, tv); + return TK_string; + case '.': + save_and_next(ls); + if (ls->current == '.') { + next(ls); + if (ls->current == '.') { + next(ls); + return TK_dots; /* ... */ + } + return TK_concat; /* .. */ + } else if (!lj_char_isdigit(ls->current)) { + return '.'; + } else { + lex_number(ls, tv); + return TK_number; + } + case END_OF_STREAM: + return TK_eof; + default: { + int c = ls->current; + next(ls); + return c; /* Single-char tokens (+ - / ...). */ + } + } + } +} + +/* -- Lexer API ----------------------------------------------------------- */ + +/* Setup lexer state. */ +int lj_lex_setup(lua_State *L, LexState *ls) +{ + ls->L = L; + ls->fs = NULL; + ls->n = 0; + ls->p = NULL; + ls->vstack = NULL; + ls->sizevstack = 0; + ls->vtop = 0; + ls->bcstack = NULL; + ls->sizebcstack = 0; + ls->lookahead = TK_eof; /* No look-ahead token. */ + ls->linenumber = 1; + ls->lastline = 1; + lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF); + next(ls); /* Read-ahead first char. */ + if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb && + char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */ + ls->n -= 2; + ls->p += 2; + next(ls); + } + if (ls->current == '#') { /* Skip POSIX #! header line. */ + do { + next(ls); + if (ls->current == END_OF_STREAM) return 0; + } while (!currIsNewline(ls)); + inclinenumber(ls); + } + return (ls->current == LUA_SIGNATURE[0]); /* Bytecode dump? */ +} + +/* Cleanup lexer state. */ +void lj_lex_cleanup(lua_State *L, LexState *ls) +{ + global_State *g = G(L); + lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine); + lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo); + lj_str_freebuf(g, &ls->sb); +} + +void lj_lex_next(LexState *ls) +{ + ls->lastline = ls->linenumber; + if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */ + ls->token = llex(ls, &ls->tokenval); /* Get next token. */ + } else { /* Otherwise return lookahead token. */ + ls->token = ls->lookahead; + ls->lookahead = TK_eof; + ls->tokenval = ls->lookaheadval; + } +} + +LexToken lj_lex_lookahead(LexState *ls) +{ + lua_assert(ls->lookahead == TK_eof); + ls->lookahead = llex(ls, &ls->lookaheadval); + return ls->lookahead; +} + +const char *lj_lex_token2str(LexState *ls, LexToken token) +{ + if (token > TK_OFS) + return tokennames[token-TK_OFS-1]; + else if (!lj_char_iscntrl(token)) + return lj_str_pushf(ls->L, "%c", token); + else + return lj_str_pushf(ls->L, "char(%d)", token); +} + +void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...) +{ + const char *tok; + va_list argp; + if (token == 0) { + tok = NULL; + } else if (token == TK_name || token == TK_string || token == TK_number) { + save(ls, '\0'); + tok = ls->sb.buf; + } else { + tok = lj_lex_token2str(ls, token); + } + va_start(argp, em); + lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp); + va_end(argp); +} + +void lj_lex_init(lua_State *L) +{ + uint32_t i; + for (i = 0; i < TK_RESERVED; i++) { + GCstr *s = lj_str_newz(L, tokennames[i]); + fixstring(s); /* Reserved words are never collected. */ + s->reserved = (uint8_t)(i+1); + } +} + diff --git a/src/luajit2/src/lj_lex.h b/src/luajit2/src/lj_lex.h new file mode 100644 index 0000000000..1ddf4b5939 --- /dev/null +++ b/src/luajit2/src/lj_lex.h @@ -0,0 +1,82 @@ +/* +** Lexical analyzer. +** Major parts taken verbatim from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#ifndef _LJ_LEX_H +#define _LJ_LEX_H + +#include <stdarg.h> + +#include "lj_obj.h" +#include "lj_err.h" + +/* Lua lexer tokens. */ +#define TKDEF(_, __) \ + _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \ + _(for) _(function) _(if) _(in) _(local) _(nil) _(not) _(or) \ + _(repeat) _(return) _(then) _(true) _(until) _(while) \ + __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \ + __(number, <number>) __(name, <name>) __(string, <string>) __(eof, <eof>) + +enum { + TK_OFS = 256, +#define TKENUM1(name) TK_##name, +#define TKENUM2(name, sym) TK_##name, +TKDEF(TKENUM1, TKENUM2) +#undef TKENUM1 +#undef TKENUM2 + TK_RESERVED = TK_while - TK_OFS +}; + +typedef int LexToken; + +/* Combined bytecode ins/line. Only used during bytecode generation. */ +typedef struct BCInsLine { + BCIns ins; /* Bytecode instruction. */ + BCLine line; /* Line number for this bytecode. */ +} BCInsLine; + +/* Info for local variables. Only used during bytecode generation. */ +typedef struct VarInfo { + GCRef name; /* Local variable name. */ + BCPos startpc; /* First point where the local variable is active. */ + BCPos endpc; /* First point where the local variable is dead. */ +} VarInfo; + +/* Lua lexer state. */ +typedef struct LexState { + struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */ + struct lua_State *L; /* Lua state. */ + TValue tokenval; /* Current token value. */ + TValue lookaheadval; /* Lookahead token value. */ + int current; /* Current character (charint). */ + LexToken token; /* Current token. */ + LexToken lookahead; /* Lookahead token. */ + MSize n; /* Bytes left in input buffer. */ + const char *p; /* Current position in input buffer. */ + SBuf sb; /* String buffer for tokens. */ + lua_Reader rfunc; /* Reader callback. */ + void *rdata; /* Reader callback data. */ + BCLine linenumber; /* Input line counter. */ + BCLine lastline; /* Line of last token. */ + GCstr *chunkname; /* Current chunk name (interned string). */ + const char *chunkarg; /* Chunk name argument. */ + VarInfo *vstack; /* Stack for names and extents of local variables. */ + MSize sizevstack; /* Size of variable stack. */ + MSize vtop; /* Top of variable stack. */ + BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */ + MSize sizebcstack; /* Size of bytecode stack. */ + uint32_t level; /* Syntactical nesting level. */ +} LexState; + +LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls); +LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls); +LJ_FUNC void lj_lex_next(LexState *ls); +LJ_FUNC LexToken lj_lex_lookahead(LexState *ls); +LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token); +LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...); +LJ_FUNC void lj_lex_init(lua_State *L); + +#endif diff --git a/src/luajit2/src/lj_lib.c b/src/luajit2/src/lj_lib.c new file mode 100644 index 0000000000..655ad08504 --- /dev/null +++ b/src/luajit2/src/lj_lib.c @@ -0,0 +1,260 @@ +/* +** Library function support. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_lib_c +#define LUA_CORE + +#include "lauxlib.h" + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_bc.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_lib.h" + +/* -- Library initialization ---------------------------------------------- */ + +static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize) +{ + if (libname) { + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16); + lua_getfield(L, -1, libname); + if (!tvistab(L->top-1)) { + L->top--; + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL) + lj_err_callerv(L, LJ_ERR_BADMODN, libname); + settabV(L, L->top, tabV(L->top-1)); + L->top++; + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + L->top--; + settabV(L, L->top-1, tabV(L->top)); + } else { + lua_createtable(L, 0, hsize); + } + return tabV(L->top-1); +} + +void lj_lib_register(lua_State *L, const char *libname, + const uint8_t *p, const lua_CFunction *cf) +{ + GCtab *env = tabref(L->env); + GCfunc *ofn = NULL; + int ffid = *p++; + BCIns *bcff = &L2GG(L)->bcff[*p++]; + GCtab *tab = lib_create_table(L, libname, *p++); + ptrdiff_t tpos = L->top - L->base; + + /* Avoid barriers further down. */ + lj_gc_anybarriert(L, tab); + tab->nomm = 0; + + for (;;) { + uint32_t tag = *p++; + MSize len = tag & LIBINIT_LENMASK; + tag &= LIBINIT_TAGMASK; + if (tag != LIBINIT_STRING) { + const char *name; + MSize nuv = (MSize)(L->top - L->base - tpos); + GCfunc *fn = lj_func_newC(L, nuv, env); + if (nuv) { + L->top = L->base + tpos; + memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv); + } + fn->c.ffid = (uint8_t)(ffid++); + name = (const char *)p; + p += len; + if (tag == LIBINIT_CF) + setmref(fn->c.pc, &G(L)->bc_cfunc_int); + else + setmref(fn->c.pc, bcff++); + if (tag == LIBINIT_ASM_) + fn->c.f = ofn->c.f; /* Copy handler from previous function. */ + else + fn->c.f = *cf++; /* Get cf or handler from C function table. */ + if (len) { + /* NOBARRIER: See above for common barrier. */ + setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn); + } + ofn = fn; + } else { + switch (tag | len) { + case LIBINIT_SET: + L->top -= 2; + if (tvisstr(L->top+1) && strV(L->top+1)->len == 0) + env = tabV(L->top); + else /* NOBARRIER: See above for common barrier. */ + copyTV(L, lj_tab_set(L, tab, L->top+1), L->top); + break; + case LIBINIT_NUMBER: + memcpy(&L->top->n, p, sizeof(double)); + L->top++; + p += sizeof(double); + break; + case LIBINIT_COPY: + copyTV(L, L->top, L->top - *p++); + L->top++; + break; + case LIBINIT_LASTCL: + setfuncV(L, L->top++, ofn); + break; + case LIBINIT_FFID: + ffid++; + break; + case LIBINIT_END: + return; + default: + setstrV(L, L->top++, lj_str_new(L, (const char *)p, len)); + p += len; + break; + } + } + } +} + +/* -- Type checks --------------------------------------------------------- */ + +TValue *lj_lib_checkany(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o >= L->top) + lj_err_arg(L, narg, LJ_ERR_NOVAL); + return o; +} + +GCstr *lj_lib_checkstr(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o < L->top) { + if (LJ_LIKELY(tvisstr(o))) { + return strV(o); + } else if (tvisnumber(o)) { + GCstr *s = lj_str_fromnumber(L, o); + setstrV(L, o, s); + return s; + } + } + lj_err_argt(L, narg, LUA_TSTRING); + return NULL; /* unreachable */ +} + +GCstr *lj_lib_optstr(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL; +} + +#if LJ_DUALNUM +void lj_lib_checknumber(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && + (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))))) + lj_err_argt(L, narg, LUA_TNUMBER); +} +#endif + +lua_Number lj_lib_checknum(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && + (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_UNLIKELY(tvisint(o))) { + lua_Number n = (lua_Number)intV(o); + setnumV(o, n); + return n; + } else { + return numV(o); + } +} + +int32_t lj_lib_checkint(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && + (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else { + int32_t i = lj_num2int(numV(o)); + if (LJ_DUALNUM) setintV(o, i); + return i; + } +} + +int32_t lj_lib_optint(lua_State *L, int narg, int32_t def) +{ + TValue *o = L->base + narg-1; + return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def; +} + +int32_t lj_lib_checkbit(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && + (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))))) + lj_err_argt(L, narg, LUA_TNUMBER); + if (LJ_LIKELY(tvisint(o))) { + return intV(o); + } else { + int32_t i = lj_num2bit(numV(o)); + if (LJ_DUALNUM) setintV(o, i); + return i; + } +} + +GCfunc *lj_lib_checkfunc(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tvisfunc(o))) + lj_err_argt(L, narg, LUA_TFUNCTION); + return funcV(o); +} + +GCtab *lj_lib_checktab(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (!(o < L->top && tvistab(o))) + lj_err_argt(L, narg, LUA_TTABLE); + return tabV(o); +} + +GCtab *lj_lib_checktabornil(lua_State *L, int narg) +{ + TValue *o = L->base + narg-1; + if (o < L->top) { + if (tvistab(o)) + return tabV(o); + else if (tvisnil(o)) + return NULL; + } + lj_err_arg(L, narg, LJ_ERR_NOTABN); + return NULL; /* unreachable */ +} + +int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst) +{ + GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg); + if (s) { + const char *opt = strdata(s); + MSize len = s->len; + int i; + for (i = 0; *(const uint8_t *)lst; i++) { + if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0) + return i; + lst += 1+*(const uint8_t *)lst; + } + lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt); + } + return def; +} + diff --git a/src/luajit2/src/lj_lib.h b/src/luajit2/src/lj_lib.h new file mode 100644 index 0000000000..2c30b68dc3 --- /dev/null +++ b/src/luajit2/src/lj_lib.h @@ -0,0 +1,112 @@ +/* +** Library function support. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_LIB_H +#define _LJ_LIB_H + +#include "lj_obj.h" + +/* +** A fallback handler is called by the assembler VM if the fast path fails: +** +** - too few arguments: unrecoverable. +** - wrong argument type: recoverable, if coercion succeeds. +** - bad argument value: unrecoverable. +** - stack overflow: recoverable, if stack reallocation succeeds. +** - extra handling: recoverable. +** +** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(), +** lj_err_caller() or lj_err_callermsg(). +** The recoverable cases return 0 or the number of results + 1. +** The assembler VM retries the fast path only if 0 is returned. +** This time the fallback must not be called again or it gets stuck in a loop. +*/ + +/* Return values from fallback handler. */ +#define FFH_RETRY 0 +#define FFH_UNREACHABLE FFH_RETRY +#define FFH_RES(n) ((n)+1) +#define FFH_TAILCALL (-1) + +LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg); +LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg); +LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg); +#if LJ_DUALNUM +LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg); +#else +#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg)) +#endif +LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg); +LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg); +LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def); +LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg); +LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg); +LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg); +LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg); +LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst); + +/* Avoid including lj_frame.h. */ +#define lj_lib_upvalue(L, n) \ + (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1]) + +#if LJ_TARGET_WINDOWS +#define lj_lib_checkfpu(L) \ + do { setnumV(L->top++, (lua_Number)1437217655); \ + if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \ + L->top--; } while (0) +#else +#define lj_lib_checkfpu(L) UNUSED(L) +#endif + +/* Push internal function on the stack. */ +static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f, + int id, int n) +{ + GCfunc *fn; + lua_pushcclosure(L, f, n); + fn = funcV(L->top-1); + fn->c.ffid = (uint8_t)id; + setmref(fn->c.pc, &G(L)->bc_cfunc_int); +} + +#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0)) + +/* Library function declarations. Scanned by buildvm. */ +#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L) +#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L) +#define LJLIB_ASM_(name) +#define LJLIB_SET(name) +#define LJLIB_PUSH(arg) +#define LJLIB_REC(handler) +#define LJLIB_NOREGUV +#define LJLIB_NOREG + +#define LJ_LIB_REG(L, regname, name) \ + lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name) + +LJ_FUNC void lj_lib_register(lua_State *L, const char *libname, + const uint8_t *init, const lua_CFunction *cf); + +/* Library init data tags. */ +#define LIBINIT_LENMASK 0x3f +#define LIBINIT_TAGMASK 0xc0 +#define LIBINIT_CF 0x00 +#define LIBINIT_ASM 0x40 +#define LIBINIT_ASM_ 0x80 +#define LIBINIT_STRING 0xc0 +#define LIBINIT_MAXSTR 0x39 +#define LIBINIT_SET 0xfa +#define LIBINIT_NUMBER 0xfb +#define LIBINIT_COPY 0xfc +#define LIBINIT_LASTCL 0xfd +#define LIBINIT_FFID 0xfe +#define LIBINIT_END 0xff + +/* Exported library functions. */ + +typedef struct RandomState RandomState; +LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs); + +#endif diff --git a/src/luajit2/src/lj_mcode.c b/src/luajit2/src/lj_mcode.c new file mode 100644 index 0000000000..279854f87b --- /dev/null +++ b/src/luajit2/src/lj_mcode.c @@ -0,0 +1,317 @@ +/* +** Machine code management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_mcode_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_jit.h" +#include "lj_mcode.h" +#include "lj_trace.h" +#include "lj_dispatch.h" +#include "lj_vm.h" + +/* -- OS-specific functions ----------------------------------------------- */ + +#if LJ_TARGET_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +#define MCPROT_RW PAGE_READWRITE +#define MCPROT_RX PAGE_EXECUTE_READ +#define MCPROT_RWX PAGE_EXECUTE_READWRITE + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) +{ + void *p = VirtualAlloc((void *)hint, sz, + MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); + if (!p && !hint) + lj_trace_err(J, LJ_TRERR_MCODEAL); + return p; +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + UNUSED(J); UNUSED(sz); + VirtualFree(p, 0, MEM_RELEASE); +} + +static void mcode_setprot(void *p, size_t sz, DWORD prot) +{ + DWORD oprot; + VirtualProtect(p, sz, prot, &oprot); +} + +#elif LJ_TARGET_POSIX + +#include <sys/mman.h> + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#define MCPROT_RW (PROT_READ|PROT_WRITE) +#define MCPROT_RX (PROT_READ|PROT_EXEC) +#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) +{ + void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED && !hint) + lj_trace_err(J, LJ_TRERR_MCODEAL); + return p; +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + UNUSED(J); + munmap(p, sz); +} + +static void mcode_setprot(void *p, size_t sz, int prot) +{ + mprotect(p, sz, prot); +} + +#elif LJ_64 + +#error "Missing OS support for explicit placement of executable memory" + +#else + +/* Fallback allocator. This will fail if memory is not executable by default. */ +#define LUAJIT_UNPROTECT_MCODE +#define MCPROT_RW 0 +#define MCPROT_RX 0 +#define MCPROT_RWX 0 + +static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) +{ + UNUSED(hint); UNUSED(prot); + return lj_mem_new(J->L, sz); +} + +static void mcode_free(jit_State *J, void *p, size_t sz) +{ + lj_mem_free(J2G(J), p, sz); +} + +#define mcode_setprot(p, sz, prot) UNUSED(p) + +#endif + +/* -- MCode area protection ----------------------------------------------- */ + +/* Define this ONLY if the page protection twiddling becomes a bottleneck. */ +#ifdef LUAJIT_UNPROTECT_MCODE + +/* It's generally considered to be a potential security risk to have +** pages with simultaneous write *and* execute access in a process. +** +** Do not even think about using this mode for server processes or +** apps handling untrusted external data (such as a browser). +** +** The security risk is not in LuaJIT itself -- but if an adversary finds +** any *other* flaw in your C application logic, then any RWX memory page +** simplifies writing an exploit considerably. +*/ +#define MCPROT_GEN MCPROT_RWX +#define MCPROT_RUN MCPROT_RWX + +static void mcode_protect(jit_State *J, int prot) +{ + UNUSED(J); UNUSED(prot); +} + +#else + +/* This is the default behaviour and much safer: +** +** Most of the time the memory pages holding machine code are executable, +** but NONE of them is writable. +** +** The current memory area is marked read-write (but NOT executable) only +** during the short time window while the assembler generates machine code. +*/ +#define MCPROT_GEN MCPROT_RW +#define MCPROT_RUN MCPROT_RX + +/* Change protection of MCode area. */ +static void mcode_protect(jit_State *J, int prot) +{ + if (J->mcprot != prot) { + mcode_setprot(J->mcarea, J->szmcarea, prot); + J->mcprot = prot; + } +} + +#endif + +/* -- MCode area allocation ----------------------------------------------- */ + +#if LJ_TARGET_X64 +#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) +#else +#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) +#endif + +#ifdef LJ_TARGET_JUMPRANGE + +/* Get memory within relative jump distance of our code in 64 bit mode. */ +static void *mcode_alloc(jit_State *J, size_t sz) +{ + /* Target an address in the static assembler code (64K aligned). + ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. + */ + uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; + const uintptr_t range = (1u << LJ_TARGET_JUMPRANGE) - (1u << 21); + /* First try a contiguous area below the last one. */ + uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; + int i; + for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ + if (mcode_validptr(hint)) { + void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); + + if (mcode_validptr(p)) { + if ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range) + return p; + mcode_free(J, p, sz); /* Free badly placed area. */ + } + } + /* Next try probing pseudo-random addresses. */ + do { + hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ + } while (!(hint + sz < range)); + hint = target + hint - (range>>1); + } + lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ + return NULL; +} + +#else + +/* All memory addresses are reachable by relative jumps. */ +#define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN) + +#endif + +/* -- MCode area management ----------------------------------------------- */ + +/* Linked list of MCode areas. */ +typedef struct MCLink { + MCode *next; /* Next area. */ + size_t size; /* Size of current area. */ +} MCLink; + +/* Allocate a new MCode area. */ +static void mcode_allocarea(jit_State *J) +{ + MCode *oldarea = J->mcarea; + size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; + sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); + J->mcarea = (MCode *)mcode_alloc(J, sz); + J->szmcarea = sz; + J->mcprot = MCPROT_GEN; + J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); + J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); + ((MCLink *)J->mcarea)->next = oldarea; + ((MCLink *)J->mcarea)->size = sz; + J->szallmcarea += sz; +} + +/* Free all MCode areas. */ +void lj_mcode_free(jit_State *J) +{ + MCode *mc = J->mcarea; + J->mcarea = NULL; + J->szallmcarea = 0; + while (mc) { + MCode *next = ((MCLink *)mc)->next; + mcode_free(J, mc, ((MCLink *)mc)->size); + mc = next; + } +} + +/* -- MCode transactions -------------------------------------------------- */ + +/* Reserve the remainder of the current MCode area. */ +MCode *lj_mcode_reserve(jit_State *J, MCode **lim) +{ + if (!J->mcarea) + mcode_allocarea(J); + else + mcode_protect(J, MCPROT_GEN); + *lim = J->mcbot; + return J->mctop; +} + +/* Commit the top part of the current MCode area. */ +void lj_mcode_commit(jit_State *J, MCode *top) +{ + J->mctop = top; + mcode_protect(J, MCPROT_RUN); +} + +/* Abort the reservation. */ +void lj_mcode_abort(jit_State *J) +{ + mcode_protect(J, MCPROT_RUN); +} + +/* Set/reset protection to allow patching of MCode areas. */ +MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) +{ +#ifdef LUAJIT_UNPROTECT_MCODE + UNUSED(J); UNUSED(ptr); UNUSED(finish); + return NULL; +#else + if (finish) { + if (J->mcarea == ptr) + mcode_protect(J, MCPROT_RUN); + else + mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN); + return NULL; + } else { + MCode *mc = J->mcarea; + /* Try current area first to use the protection cache. */ + if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { + mcode_protect(J, MCPROT_GEN); + return mc; + } + /* Otherwise search through the list of MCode areas. */ + for (;;) { + mc = ((MCLink *)mc)->next; + lua_assert(mc != NULL); + if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { + mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN); + return mc; + } + } + } +#endif +} + +/* Limit of MCode reservation reached. */ +void lj_mcode_limiterr(jit_State *J, size_t need) +{ + size_t sizemcode, maxmcode; + lj_mcode_abort(J); + sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; + sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); + maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; + if ((size_t)need > sizemcode) + lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ + if (J->szallmcarea + sizemcode > maxmcode) + lj_trace_err(J, LJ_TRERR_MCODEAL); + mcode_allocarea(J); + lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ +} + +#endif diff --git a/src/luajit2/src/lj_mcode.h b/src/luajit2/src/lj_mcode.h new file mode 100644 index 0000000000..be1bdb5aa3 --- /dev/null +++ b/src/luajit2/src/lj_mcode.h @@ -0,0 +1,23 @@ +/* +** Machine code management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_MCODE_H +#define _LJ_MCODE_H + +#include "lj_jit.h" + +#if LJ_HASJIT +LJ_FUNC void lj_mcode_free(jit_State *J); +LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim); +LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m); +LJ_FUNC void lj_mcode_abort(jit_State *J); +LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish); +LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need); + +#define lj_mcode_commitbot(J, m) (J->mcbot = (m)) + +#endif + +#endif diff --git a/src/luajit2/src/lj_meta.c b/src/luajit2/src/lj_meta.c new file mode 100644 index 0000000000..278d2d34b9 --- /dev/null +++ b/src/luajit2/src/lj_meta.c @@ -0,0 +1,463 @@ +/* +** Metamethod handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_meta_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_vm.h" + +/* -- Metamethod handling ------------------------------------------------- */ + +/* String interning of metamethod names for fast indexing. */ +void lj_meta_init(lua_State *L) +{ +#define MMNAME(name) "__" #name + const char *metanames = MMDEF(MMNAME); +#undef MMNAME + global_State *g = G(L); + const char *p, *q; + uint32_t mm; + for (mm = 0, p = metanames; *p; mm++, p = q) { + GCstr *s; + for (q = p+2; *q && *q != '_'; q++) ; + s = lj_str_new(L, p, (size_t)(q-p)); + /* NOBARRIER: g->gcroot[] is a GC root. */ + setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s)); + } +} + +/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */ +cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) +{ + cTValue *mo = lj_tab_getstr(mt, name); + lua_assert(mm <= MM_FAST); + if (!mo || tvisnil(mo)) { /* No metamethod? */ + mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */ + return NULL; + } + return mo; +} + +/* Lookup metamethod for object. */ +cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm) +{ + GCtab *mt; + if (tvistab(o)) + mt = tabref(tabV(o)->metatable); + else if (tvisudata(o)) + mt = tabref(udataV(o)->metatable); + else + mt = tabref(basemt_obj(G(L), o)); + if (mt) { + cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm)); + if (mo) + return mo; + } + return niltv(L); +} + +#if LJ_HASFFI +/* Tailcall from C function. */ +int lj_meta_tailcall(lua_State *L, cTValue *tv) +{ + TValue *base = L->base; + TValue *top = L->top; + const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */ + copyTV(L, base-1, tv); /* Replace frame with new object. */ + top->u64 = 0; + setframe_pc(top, pc); + setframe_gc(top+1, obj2gco(L)); /* Dummy frame object. */ + setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT); + L->base = L->top = top+2; + /* + ** before: [old_mo|PC] [... ...] + ** ^base ^top + ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta] + ** ^base/top + ** tailcall: [new_mo|PC] [... ...] + ** ^base ^top + */ + return 0; +} +#endif + +/* Setup call to metamethod to be run by Assembler VM. */ +static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo, + cTValue *a, cTValue *b) +{ + /* + ** |-- framesize -> top top+1 top+2 top+3 + ** before: [func slots ...] + ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b] + ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b] + ** ^-- func base ^-- mm base + ** after mm: [func slots ...] [result] + ** ^-- copy to base[PC_RA] --/ for lj_cont_ra + ** istruecond + branch for lj_cont_cond* + ** ignore for lj_cont_nop + ** next PC: [func slots ...] + */ + TValue *top = L->top; + if (curr_funcisL(L)) top = curr_topL(L); + setcont(top, cont); /* Assembler VM stores PC in upper word. */ + copyTV(L, top+1, mo); /* Store metamethod and two arguments. */ + copyTV(L, top+2, a); + copyTV(L, top+3, b); + return top+2; /* Return new base. */ +} + +/* -- C helpers for some instructions, called from assembler VM ----------- */ + +/* Helper for TGET*. __index chain and metamethod. */ +cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k) +{ + int loop; + for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { + cTValue *mo; + if (LJ_LIKELY(tvistab(o))) { + GCtab *t = tabV(o); + cTValue *tv = lj_tab_get(L, t, k); + if (!tvisnil(tv) || + !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index))) + return tv; + } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) { + lj_err_optype(L, o, LJ_ERR_OPINDEX); + return NULL; /* unreachable */ + } + if (tvisfunc(mo)) { + L->top = mmcall(L, lj_cont_ra, mo, o, k); + return NULL; /* Trigger metamethod call. */ + } + o = mo; + } + lj_err_msg(L, LJ_ERR_GETLOOP); + return NULL; /* unreachable */ +} + +/* Helper for TSET*. __newindex chain and metamethod. */ +TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k) +{ + TValue tmp; + int loop; + for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) { + cTValue *mo; + if (LJ_LIKELY(tvistab(o))) { + GCtab *t = tabV(o); + cTValue *tv = lj_tab_get(L, t, k); + if (LJ_LIKELY(!tvisnil(tv))) { + t->nomm = 0; /* Invalidate negative metamethod cache. */ + lj_gc_anybarriert(L, t); + return (TValue *)tv; + } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) { + t->nomm = 0; /* Invalidate negative metamethod cache. */ + lj_gc_anybarriert(L, t); + if (tv != niltv(L)) + return (TValue *)tv; + if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX); + else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; } + else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX); + return lj_tab_newkey(L, t, k); + } + } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) { + lj_err_optype(L, o, LJ_ERR_OPINDEX); + return NULL; /* unreachable */ + } + if (tvisfunc(mo)) { + L->top = mmcall(L, lj_cont_nop, mo, o, k); + /* L->top+2 = v filled in by caller. */ + return NULL; /* Trigger metamethod call. */ + } + copyTV(L, &tmp, mo); + o = &tmp; + } + lj_err_msg(L, LJ_ERR_SETLOOP); + return NULL; /* unreachable */ +} + +static cTValue *str2num(cTValue *o, TValue *n) +{ + if (tvisnum(o)) + return o; + else if (tvisint(o)) + return (setnumV(n, (lua_Number)intV(o)), n); + else if (tvisstr(o) && lj_str_tonum(strV(o), n)) + return n; + else + return NULL; +} + +/* Helper for arithmetic instructions. Coercion, metamethod. */ +TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc, + BCReg op) +{ + MMS mm = bcmode_mm(op); + TValue tempb, tempc; + cTValue *b, *c; + if ((b = str2num(rb, &tempb)) != NULL && + (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */ + setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add)); + return NULL; + } else { + cTValue *mo = lj_meta_lookup(L, rb, mm); + if (tvisnil(mo)) { + mo = lj_meta_lookup(L, rc, mm); + if (tvisnil(mo)) { + if (str2num(rb, &tempb) == NULL) rc = rb; + lj_err_optype(L, rc, LJ_ERR_OPARITH); + return NULL; /* unreachable */ + } + } + return mmcall(L, lj_cont_ra, mo, rb, rc); + } +} + +/* In-place coercion of a number to a string. */ +static LJ_AINLINE int tostring(lua_State *L, TValue *o) +{ + if (tvisstr(o)) { + return 1; + } else if (tvisnumber(o)) { + setstrV(L, o, lj_str_fromnumber(L, o)); + return 1; + } else { + return 0; + } +} + +/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */ +TValue *lj_meta_cat(lua_State *L, TValue *top, int left) +{ + do { + int n = 1; + if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) { + cTValue *mo = lj_meta_lookup(L, top-1, MM_concat); + if (tvisnil(mo)) { + mo = lj_meta_lookup(L, top, MM_concat); + if (tvisnil(mo)) { + if (tvisstr(top-1) || tvisnumber(top-1)) top++; + lj_err_optype(L, top-1, LJ_ERR_OPCAT); + return NULL; /* unreachable */ + } + } + /* One of the top two elements is not a string, call __cat metamethod: + ** + ** before: [...][CAT stack .........................] + ** top-1 top top+1 top+2 + ** pick two: [...][CAT stack ...] [o1] [o2] + ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2] + ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2] + ** ^-- func base ^-- mm base + ** after mm: [...][CAT stack ...] <--push-- [result] + ** next step: [...][CAT stack .............] + */ + copyTV(L, top+2, top); /* Careful with the order of stack copies! */ + copyTV(L, top+1, top-1); + copyTV(L, top, mo); + setcont(top-1, lj_cont_cat); + return top+1; /* Trigger metamethod call. */ + } else if (strV(top)->len == 0) { /* Shortcut. */ + (void)tostring(L, top-1); + } else { + /* Pick as many strings as possible from the top and concatenate them: + ** + ** before: [...][CAT stack ...........................] + ** pick str: [...][CAT stack ...] [...... strings ......] + ** concat: [...][CAT stack ...] [result] + ** next step: [...][CAT stack ............] + */ + MSize tlen = strV(top)->len; + char *buffer; + int i; + for (n = 1; n <= left && tostring(L, top-n); n++) { + MSize len = strV(top-n)->len; + if (len >= LJ_MAX_STR - tlen) + lj_err_msg(L, LJ_ERR_STROV); + tlen += len; + } + buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen); + n--; + tlen = 0; + for (i = n; i >= 0; i--) { + MSize len = strV(top-i)->len; + memcpy(buffer + tlen, strVdata(top-i), len); + tlen += len; + } + setstrV(L, top-n, lj_str_new(L, buffer, tlen)); + } + left -= n; + top -= n; + } while (left >= 1); + lj_gc_check_fixtop(L); + return NULL; +} + +/* Helper for LEN. __len metamethod. */ +TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o) +{ + cTValue *mo = lj_meta_lookup(L, o, MM_len); + if (tvisnil(mo)) { +#ifdef LUAJIT_ENABLE_LUA52COMPAT + if (tvistab(o)) + tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<<MM_len); + else +#endif + lj_err_optype(L, o, LJ_ERR_OPLEN); + return NULL; + } +#ifdef LUAJIT_ENABLE_LUA52COMPAT + return mmcall(L, lj_cont_ra, mo, o, o); +#else + return mmcall(L, lj_cont_ra, mo, o, niltv(L)); +#endif +} + +/* Helper for equality comparisons. __eq metamethod. */ +TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne) +{ + /* Field metatable must be at same offset for GCtab and GCudata! */ + cTValue *mo = lj_meta_fast(L, tabref(o1->gch.metatable), MM_eq); + if (mo) { + TValue *top; + uint32_t it; + if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) { + cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq); + if (mo2 == NULL || !lj_obj_equal(mo, mo2)) + return (TValue *)(intptr_t)ne; + } + top = curr_top(L); + setcont(top, ne ? lj_cont_condf : lj_cont_condt); + copyTV(L, top+1, mo); + it = ~(uint32_t)o1->gch.gct; + setgcV(L, top+2, o1, it); + setgcV(L, top+3, o2, it); + return top+2; /* Trigger metamethod call. */ + } + return (TValue *)(intptr_t)ne; +} + +#if LJ_HASFFI +TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins) +{ + ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt; + int op = (int)bc_op(ins) & ~1; + TValue tv; + cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)]; + cTValue *o1mm = o1; + if (op == BC_ISEQV) { + o2 = &L->base[bc_d(ins)]; + if (!tviscdata(o1mm)) o1mm = o2; + } else if (op == BC_ISEQS) { + setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins)))); + o2 = &tv; + } else if (op == BC_ISEQN) { + o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; + } else { + lua_assert(op == BC_ISEQP); + setitype(&tv, ~bc_d(ins)); + o2 = &tv; + } + mo = lj_meta_lookup(L, o1mm, MM_eq); + if (LJ_LIKELY(!tvisnil(mo))) + return mmcall(L, cont, mo, o1, o2); + else + return (TValue *)(intptr_t)(bc_op(ins) & 1); +} +#endif + +/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */ +TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op) +{ + if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) { + ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; + MMS mm = (op & 2) ? MM_le : MM_lt; + cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm); + if (LJ_UNLIKELY(tvisnil(mo))) goto err; + return mmcall(L, cont, mo, o1, o2); + } else if (itype(o1) == itype(o2)) { /* Never called with two numbers. */ + if (tvisstr(o1) && tvisstr(o2)) { + int32_t res = lj_str_cmp(strV(o1), strV(o2)); + return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1)); + } else { + trymt: + while (1) { + ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt; + MMS mm = (op & 2) ? MM_le : MM_lt; + cTValue *mo = lj_meta_lookup(L, o1, mm); + cTValue *mo2 = lj_meta_lookup(L, o2, mm); + if (tvisnil(mo) || !lj_obj_equal(mo, mo2)) { + if (op & 2) { /* MM_le not found: retry with MM_lt. */ + cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */ + op ^= 3; /* Use LT and flip condition. */ + continue; + } + goto err; + } + return mmcall(L, cont, mo, o1, o2); + } + } + } else if (tvisbool(o1) && tvisbool(o2)) { + goto trymt; + } else { + err: + lj_err_comp(L, o1, o2); + return NULL; + } +} + +/* Helper for calls. __call metamethod. */ +void lj_meta_call(lua_State *L, TValue *func, TValue *top) +{ + cTValue *mo = lj_meta_lookup(L, func, MM_call); + TValue *p; + if (!tvisfunc(mo)) + lj_err_optype_call(L, func); + for (p = top; p > func; p--) copyTV(L, p, p-1); + copyTV(L, func, mo); +} + +/* Helper for FORI. Coercion. */ +void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o) +{ + if (!(tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o)))) + lj_err_msg(L, LJ_ERR_FORINIT); + if (!(tvisnumber(o+1) || (tvisstr(o+1) && lj_str_tonumber(strV(o+1), o+1)))) + lj_err_msg(L, LJ_ERR_FORLIM); + if (!(tvisnumber(o+2) || (tvisstr(o+2) && lj_str_tonumber(strV(o+2), o+2)))) + lj_err_msg(L, LJ_ERR_FORSTEP); + if (LJ_DUALNUM) { + /* Ensure all slots are integers or all slots are numbers. */ + int32_t k[3]; + int nint = 0; + ptrdiff_t i; + for (i = 0; i <= 2; i++) { + if (tvisint(o+i)) { + k[i] = intV(o+i); nint++; + } else { + k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i)); + } + } + if (nint == 3) { /* Narrow to integers. */ + setintV(o, k[0]); + setintV(o+1, k[1]); + setintV(o+2, k[2]); + } else if (nint != 0) { /* Widen to numbers. */ + if (tvisint(o)) setnumV(o, (lua_Number)intV(o)); + if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1)); + if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2)); + } + } +} + diff --git a/src/luajit2/src/lj_meta.h b/src/luajit2/src/lj_meta.h new file mode 100644 index 0000000000..3fec5b2b03 --- /dev/null +++ b/src/luajit2/src/lj_meta.h @@ -0,0 +1,37 @@ +/* +** Metamethod handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_META_H +#define _LJ_META_H + +#include "lj_obj.h" + +/* Metamethod handling */ +LJ_FUNC void lj_meta_init(lua_State *L); +LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name); +LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm); +#if LJ_HASFFI +LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv); +#endif + +#define lj_meta_fastg(g, mt, mm) \ + ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \ + lj_meta_cache(mt, mm, mmname_str(g, mm))) +#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm) + +/* C helpers for some instructions, called from assembler VM. */ +LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k); +LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k); +LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, + cTValue *rc, BCReg op); +LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left); +LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o); +LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne); +LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins); +LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op); +LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top); +LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o); + +#endif diff --git a/src/luajit2/src/lj_obj.c b/src/luajit2/src/lj_obj.c new file mode 100644 index 0000000000..1476f0b2c2 --- /dev/null +++ b/src/luajit2/src/lj_obj.c @@ -0,0 +1,35 @@ +/* +** Miscellaneous object handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_obj_c +#define LUA_CORE + +#include "lj_obj.h" + +/* Object type names. */ +LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */ + "no value", "nil", "boolean", "userdata", "number", "string", + "table", "function", "userdata", "thread", "proto", "cdata" +}; + +LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */ + "nil", "boolean", "boolean", "userdata", "string", "upval", "thread", + "proto", "function", "trace", "cdata", "table", "userdata", "number" +}; + +/* Compare two objects without calling metamethods. */ +int lj_obj_equal(cTValue *o1, cTValue *o2) +{ + if (itype(o1) == itype(o2)) { + if (tvispri(o1)) + return 1; + if (!tvisnum(o1)) + return gcrefeq(o1->gcr, o2->gcr); + } else if (!tvisnumber(o1) || !tvisnumber(o2)) { + return 0; + } + return numberVnum(o1) == numberVnum(o2); +} + diff --git a/src/luajit2/src/lj_obj.h b/src/luajit2/src/lj_obj.h new file mode 100644 index 0000000000..afb29d0f7b --- /dev/null +++ b/src/luajit2/src/lj_obj.h @@ -0,0 +1,841 @@ +/* +** LuaJIT VM tags, values and objects. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#ifndef _LJ_OBJ_H +#define _LJ_OBJ_H + +#include "lua.h" +#include "lj_def.h" +#include "lj_arch.h" + +/* -- Memory references (32 bit address space) ---------------------------- */ + +/* Memory size. */ +typedef uint32_t MSize; + +/* Memory reference */ +typedef struct MRef { + uint32_t ptr32; /* Pseudo 32 bit pointer. */ +} MRef; + +#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32) + +#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p)) +#define setmrefr(r, v) ((r).ptr32 = (v).ptr32) + +/* -- GC object references (32 bit address space) ------------------------- */ + +/* GCobj reference */ +typedef struct GCRef { + uint32_t gcptr32; /* Pseudo 32 bit pointer. */ +} GCRef; + +/* Common GC header for all collectable objects. */ +#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct +/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */ + +#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32) +#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32) +#define gcrefu(r) ((r).gcptr32) +#define gcrefi(r) ((int32_t)(r).gcptr32) +#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32) +#define gcnext(gc) (gcref((gc)->gch.nextgc)) + +#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch) +#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i)) +#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p)) +#define setgcrefnull(r) ((r).gcptr32 = 0) +#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32) + +/* IMPORTANT NOTE: +** +** All uses of the setgcref* macros MUST be accompanied with a write barrier. +** +** This is to ensure the integrity of the incremental GC. The invariant +** to preserve is that a black object never points to a white object. +** I.e. never store a white object into a field of a black object. +** +** It's ok to LEAVE OUT the write barrier ONLY in the following cases: +** - The source is not a GC object (NULL). +** - The target is a GC root. I.e. everything in global_State. +** - The target is a lua_State field (threads are never black). +** - The target is a stack slot, see setgcV et al. +** - The target is an open upvalue, i.e. pointing to a stack slot. +** - The target is a newly created object (i.e. marked white). But make +** sure nothing invokes the GC inbetween. +** - The target and the source are the same object (self-reference). +** - The target already contains the object (e.g. moving elements around). +** +** The most common case is a store to a stack slot. All other cases where +** a barrier has been omitted are annotated with a NOBARRIER comment. +** +** The same logic applies for stores to table slots (array part or hash +** part). ALL uses of lj_tab_set* require a barrier for the stored value +** *and* the stored key, based on the above rules. In practice this means +** a barrier is needed if *either* of the key or value are a GC object. +** +** It's ok to LEAVE OUT the write barrier in the following special cases: +** - The stored value is nil. The key doesn't matter because it's either +** not resurrected or lj_tab_newkey() will take care of the key barrier. +** - The key doesn't matter if the *previously* stored value is guaranteed +** to be non-nil (because the key is kept alive in the table). +** - The key doesn't matter if it's guaranteed not to be part of the table, +** since lj_tab_newkey() takes care of the key barrier. This applies +** trivially to new tables, but watch out for resurrected keys. Storing +** a nil value leaves the key in the table! +** +** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used +** by the interpreter for all table stores. +** +** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark +** dead keys in tables. The reference is left in, but it's guaranteed to +** be never dereferenced as long as the value is nil. It's ok if the key is +** freed or if any object subsequently gets the same address. +** +** Not destroying dead keys helps to keep key hash slots stable. This avoids +** specialization back-off for HREFK when a value flips between nil and +** non-nil and the GC gets in the way. It also allows safely hoisting +** HREF/HREFK across GC steps. Dead keys are only removed if a table is +** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize. +** +** The trade-off is that a write barrier for tables must take the key into +** account, too. Implicitly resurrecting the key by storing a non-nil value +** may invalidate the incremental GC invariant. +*/ + +/* -- Common type definitions --------------------------------------------- */ + +/* Types for handling bytecodes. Need this here, details in lj_bc.h. */ +typedef uint32_t BCIns; /* Bytecode instruction. */ +typedef uint32_t BCPos; /* Bytecode position. */ +typedef uint32_t BCReg; /* Bytecode register. */ +typedef int32_t BCLine; /* Bytecode line number. */ + +/* Internal assembler functions. Never call these directly from C. */ +typedef void (*ASMFunction)(void); + +/* Resizable string buffer. Need this here, details in lj_str.h. */ +typedef struct SBuf { + char *buf; /* String buffer base. */ + MSize n; /* String buffer length. */ + MSize sz; /* String buffer size. */ +} SBuf; + +/* -- Tags and values ----------------------------------------------------- */ + +/* Frame link. */ +typedef union { + int32_t ftsz; /* Frame type and size of previous frame. */ + MRef pcr; /* Overlaps PC for Lua frames. */ +} FrameLink; + +/* Tagged value. */ +typedef LJ_ALIGN(8) union TValue { + uint64_t u64; /* 64 bit pattern overlaps number. */ + lua_Number n; /* Number object overlaps split tag/value object. */ + struct { + LJ_ENDIAN_LOHI( + union { + GCRef gcr; /* GCobj reference (if any). */ + int32_t i; /* Integer value. */ + }; + , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ + ) + }; + struct { + LJ_ENDIAN_LOHI( + GCRef func; /* Function for next frame (or dummy L). */ + , FrameLink tp; /* Link to previous frame. */ + ) + } fr; + struct { + LJ_ENDIAN_LOHI( + uint32_t lo; /* Lower 32 bits of number. */ + , uint32_t hi; /* Upper 32 bits of number. */ + ) + } u32; +} TValue; + +typedef const TValue cTValue; + +#define tvref(r) (mref(r, TValue)) + +/* More external and GCobj tags for internal objects. */ +#define LAST_TT LUA_TTHREAD +#define LUA_TPROTO (LAST_TT+1) +#define LUA_TCDATA (LAST_TT+2) + +/* Internal object tags. +** +** Internal tags overlap the MSW of a number object (must be a double). +** Interpreted as a double these are special NaNs. The FPU only generates +** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available +** for use as internal tags. Small negative numbers are used to shorten the +** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate). +** +** ---MSW---.---LSW--- +** primitive types | itype | | +** lightuserdata | itype | void * | (32 bit platforms) +** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers) +** GC objects | itype | GCRef | +** int (LJ_DUALNUM)| itype | int | +** number -------double------ +** +** ORDER LJ_T +** Primitive types nil/false/true must be first, lightuserdata next. +** GC objects are at the end, table/userdata must be lowest. +** Also check lj_ir.h for similar ordering constraints. +*/ +#define LJ_TNIL (~0u) +#define LJ_TFALSE (~1u) +#define LJ_TTRUE (~2u) +#define LJ_TLIGHTUD (~3u) +#define LJ_TSTR (~4u) +#define LJ_TUPVAL (~5u) +#define LJ_TTHREAD (~6u) +#define LJ_TPROTO (~7u) +#define LJ_TFUNC (~8u) +#define LJ_TTRACE (~9u) +#define LJ_TCDATA (~10u) +#define LJ_TTAB (~11u) +#define LJ_TUDATA (~12u) +/* This is just the canonical number type used in some places. */ +#define LJ_TNUMX (~13u) + +/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */ +#if LJ_64 +#define LJ_TISNUM 0xfffeffffu +#else +#define LJ_TISNUM LJ_TNUMX +#endif +#define LJ_TISTRUECOND LJ_TFALSE +#define LJ_TISPRI LJ_TTRUE +#define LJ_TISGCV (LJ_TSTR+1) +#define LJ_TISTABUD LJ_TTAB + +/* -- String object ------------------------------------------------------- */ + +/* String object header. String payload follows. */ +typedef struct GCstr { + GCHeader; + uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */ + uint8_t unused; + MSize hash; /* Hash of string. */ + MSize len; /* Size of string. */ +} GCstr; + +#define strref(r) (&gcref((r))->str) +#define strdata(s) ((const char *)((s)+1)) +#define strdatawr(s) ((char *)((s)+1)) +#define strVdata(o) strdata(strV(o)) +#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1) + +/* -- Userdata object ----------------------------------------------------- */ + +/* Userdata object. Payload follows. */ +typedef struct GCudata { + GCHeader; + uint8_t udtype; /* Userdata type. */ + uint8_t unused2; + GCRef env; /* Should be at same offset in GCfunc. */ + MSize len; /* Size of payload. */ + GCRef metatable; /* Must be at same offset in GCtab. */ + uint32_t align1; /* To force 8 byte alignment of the payload. */ +} GCudata; + +/* Userdata types. */ +enum { + UDTYPE_USERDATA, /* Regular userdata. */ + UDTYPE_IO_FILE, /* I/O library FILE. */ + UDTYPE_FFI_CLIB, /* FFI C library namespace. */ + UDTYPE__MAX +}; + +#define uddata(u) ((void *)((u)+1)) +#define sizeudata(u) (sizeof(struct GCudata)+(u)->len) + +/* -- C data object ------------------------------------------------------- */ + +/* C data object. Payload follows. */ +typedef struct GCcdata { + GCHeader; + uint16_t typeid; /* C type ID. */ +} GCcdata; + +/* Prepended to variable-sized or realigned C data objects. */ +typedef struct GCcdataVar { + uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */ + uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */ + MSize len; /* Size of payload. */ +} GCcdataVar; + +#define cdataptr(cd) ((void *)((cd)+1)) +#define cdataisv(cd) ((cd)->marked & 0x80) +#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar))) +#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len) +#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra) +#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset)) + +/* -- Prototype object ---------------------------------------------------- */ + +#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef)) +#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1)) + +typedef struct GCproto { + GCHeader; + uint8_t numparams; /* Number of parameters. */ + uint8_t framesize; /* Fixed frame size. */ + MSize sizebc; /* Number of bytecode instructions. */ + GCRef gclist; + MRef k; /* Split constant array (points to the middle). */ + MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */ + MSize sizekgc; /* Number of collectable constants. */ + MSize sizekn; /* Number of lua_Number constants. */ + MSize sizept; /* Total size including colocated arrays. */ + uint8_t sizeuv; /* Number of upvalues. */ + uint8_t flags; /* Miscellaneous flags (see below). */ + uint16_t trace; /* Anchor for chain of root traces. */ + /* ------ The following fields are for debugging/tracebacks only ------ */ + GCRef chunkname; /* Name of the chunk this function was defined in. */ + BCLine firstline; /* First line of the function definition. */ + BCLine numline; /* Number of lines for the function definition. */ + MRef lineinfo; /* Compressed map from bytecode ins. to source line. */ + MRef uvinfo; /* Upvalue names. */ + MRef varinfo; /* Names and compressed extents of local variables. */ +} GCproto; + +/* Flags for prototype. */ +#define PROTO_CHILD 0x01 /* Has child prototypes. */ +#define PROTO_VARARG 0x02 /* Vararg function. */ +#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */ +#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */ +#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */ +/* Only used during parsing. */ +#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */ +#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */ + +#define proto_kgc(pt, idx) \ + check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \ + gcref(mref((pt)->k, GCRef)[(idx)])) +#define proto_knumtv(pt, idx) \ + check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)]) +#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto))) +#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt))) +#define proto_uv(pt) (mref((pt)->uv, uint16_t)) + +#define proto_chunkname(pt) (strref((pt)->chunkname)) +#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt)))) +#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void)) +#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t)) +#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t)) + +/* -- Upvalue object ------------------------------------------------------ */ + +typedef struct GCupval { + GCHeader; + uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */ + uint8_t unused; + union { + TValue tv; /* If closed: the value itself. */ + struct { /* If open: double linked list, anchored at thread. */ + GCRef prev; + GCRef next; + }; + }; + MRef v; /* Points to stack slot (open) or above (closed). */ + uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */ +} GCupval; + +#define uvprev(uv_) (&gcref((uv_)->prev)->uv) +#define uvnext(uv_) (&gcref((uv_)->next)->uv) +#define uvval(uv_) (mref((uv_)->v, TValue)) + +/* -- Function object (closures) ------------------------------------------ */ + +/* Common header for functions. env should be at same offset in GCudata. */ +#define GCfuncHeader \ + GCHeader; uint8_t ffid; uint8_t nupvalues; \ + GCRef env; GCRef gclist; MRef pc + +typedef struct GCfuncC { + GCfuncHeader; + lua_CFunction f; /* C function to be called. */ + TValue upvalue[1]; /* Array of upvalues (TValue). */ +} GCfuncC; + +typedef struct GCfuncL { + GCfuncHeader; + GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */ +} GCfuncL; + +typedef union GCfunc { + GCfuncC c; + GCfuncL l; +} GCfunc; + +#define FF_LUA 0 +#define FF_C 1 +#define isluafunc(fn) ((fn)->c.ffid == FF_LUA) +#define iscfunc(fn) ((fn)->c.ffid == FF_C) +#define isffunc(fn) ((fn)->c.ffid > FF_C) +#define funcproto(fn) \ + check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto))) +#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n)) +#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n)) + +/* -- Table object -------------------------------------------------------- */ + +/* Hash node. */ +typedef struct Node { + TValue val; /* Value object. Must be first field. */ + TValue key; /* Key object. */ + MRef next; /* Hash chain. */ + MRef freetop; /* Top of free elements (stored in t->node[0]). */ +} Node; + +LJ_STATIC_ASSERT(offsetof(Node, val) == 0); + +typedef struct GCtab { + GCHeader; + uint8_t nomm; /* Negative cache for fast metamethods. */ + int8_t colo; /* Array colocation. */ + MRef array; /* Array part. */ + GCRef gclist; + GCRef metatable; /* Must be at same offset in GCudata. */ + MRef node; /* Hash part. */ + uint32_t asize; /* Size of array part (keys [0, asize-1]). */ + uint32_t hmask; /* Hash part mask (size of hash part - 1). */ +} GCtab; + +#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab)) +#define tabref(r) (&gcref((r))->tab) +#define noderef(r) (mref((r), Node)) +#define nextnode(n) (mref((n)->next, Node)) + +/* -- State objects ------------------------------------------------------- */ + +/* VM states. */ +enum { + LJ_VMST_INTERP, /* Interpreter. */ + LJ_VMST_C, /* C function. */ + LJ_VMST_GC, /* Garbage collector. */ + LJ_VMST_EXIT, /* Trace exit handler. */ + LJ_VMST_RECORD, /* Trace recorder. */ + LJ_VMST_OPT, /* Optimizer. */ + LJ_VMST_ASM, /* Assembler. */ + LJ_VMST__MAX +}; + +#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st) + +/* Metamethods. ORDER MM */ +#ifdef LUAJIT_ENABLE_LUA52COMPAT +#define MMDEF_52(_) _(pairs) _(ipairs) +#else +#define MMDEF_52(_) +#endif + +#define MMDEF(_) \ + _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \ + /* Only the above (fast) metamethods are negative cached (max. 8). */ \ + _(lt) _(le) _(concat) _(call) \ + /* The following must be in ORDER ARITH. */ \ + _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \ + /* The following are used in the standard libraries. */ \ + _(metatable) _(tostring) MMDEF_52(_) + +typedef enum { +#define MMENUM(name) MM_##name, +MMDEF(MMENUM) +#undef MMENUM + MM__MAX, + MM____ = MM__MAX, + MM_FAST = MM_len +} MMS; + +/* GC root IDs. */ +typedef enum { + GCROOT_MMNAME, /* Metamethod names. */ + GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1, + GCROOT_BASEMT, /* Metatables for base types. */ + GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX, + GCROOT_IO_INPUT, /* Userdata for default I/O input file. */ + GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */ + GCROOT_MAX +} GCRootID; + +#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)]) +#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)]) +#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)])) + +typedef struct GCState { + MSize total; /* Memory currently allocated. */ + MSize threshold; /* Memory threshold. */ + uint8_t currentwhite; /* Current white color. */ + uint8_t state; /* GC state. */ + uint8_t unused1; + uint8_t unused2; + MSize sweepstr; /* Sweep position in string table. */ + GCRef root; /* List of all collectable objects. */ + MRef sweep; /* Sweep position in root list. */ + GCRef gray; /* List of gray objects. */ + GCRef grayagain; /* List of objects for atomic traversal. */ + GCRef weak; /* List of weak tables (to be cleared). */ + GCRef mmudata; /* List of userdata (to be finalized). */ + MSize stepmul; /* Incremental GC step granularity. */ + MSize debt; /* Debt (how much GC is behind schedule). */ + MSize estimate; /* Estimate of memory actually in use. */ + MSize pause; /* Pause between successive GC cycles. */ +} GCState; + +/* Global state, shared by all threads of a Lua universe. */ +typedef struct global_State { + GCRef *strhash; /* String hash table (hash chain anchors). */ + MSize strmask; /* String hash mask (size of hash table - 1). */ + MSize strnum; /* Number of strings in hash table. */ + lua_Alloc allocf; /* Memory allocator. */ + void *allocd; /* Memory allocator data. */ + GCState gc; /* Garbage collector. */ + SBuf tmpbuf; /* Temporary buffer for string concatenation. */ + Node nilnode; /* Fallback 1-element hash part (nil key and value). */ + GCstr strempty; /* Empty string. */ + uint8_t stremptyz; /* Zero terminator of empty string. */ + uint8_t hookmask; /* Hook mask. */ + uint8_t dispatchmode; /* Dispatch mode. */ + uint8_t vmevmask; /* VM event mask. */ + GCRef mainthref; /* Link to main thread. */ + TValue registrytv; /* Anchor for registry. */ + TValue tmptv, tmptv2; /* Temporary TValues. */ + GCupval uvhead; /* Head of double-linked list of all open upvalues. */ + int32_t hookcount; /* Instruction hook countdown. */ + int32_t hookcstart; /* Start count for instruction hook counter. */ + lua_Hook hookf; /* Hook function. */ + lua_CFunction wrapf; /* Wrapper for C function calls. */ + lua_CFunction panic; /* Called as a last resort for errors. */ + volatile int32_t vmstate; /* VM state or current JIT code trace number. */ + BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */ + BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */ + GCRef jit_L; /* Current JIT code lua_State or NULL. */ + MRef jit_base; /* Current JIT code L->base. */ + MRef ctype_state; /* Pointer to C type state. */ + GCRef gcroot[GCROOT_MAX]; /* GC roots. */ +} global_State; + +#define mainthread(g) (&gcref(g->mainthref)->th) +#define niltv(L) \ + check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val) +#define niltvg(g) \ + check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val) + +/* Hook management. Hook event masks are defined in lua.h. */ +#define HOOK_EVENTMASK 0x0f +#define HOOK_ACTIVE 0x10 +#define HOOK_ACTIVE_SHIFT 4 +#define HOOK_VMEVENT 0x20 +#define HOOK_GC 0x40 +#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE) +#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE) +#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC)) +#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT)) +#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE) +#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK) +#define hook_restore(g, h) \ + ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h)) + +/* Per-thread state object. */ +struct lua_State { + GCHeader; + uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */ + uint8_t status; /* Thread status. */ + MRef glref; /* Link to global state. */ + GCRef gclist; /* GC chain. */ + TValue *base; /* Base of currently executing function. */ + TValue *top; /* First free slot in the stack. */ + MRef maxstack; /* Last free slot in the stack. */ + MRef stack; /* Stack base. */ + GCRef openupval; /* List of open upvalues in the stack. */ + GCRef env; /* Thread environment (table of globals). */ + void *cframe; /* End of C stack frame chain. */ + MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */ +}; + +#define G(L) (mref(L->glref, global_State)) +#define registry(L) (&G(L)->registrytv) + +/* Macros to access the currently executing (Lua) function. */ +#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn) +#define curr_funcisL(L) (isluafunc(curr_func(L))) +#define curr_proto(L) (funcproto(curr_func(L))) +#define curr_topL(L) (L->base + curr_proto(L)->framesize) +#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) + +/* -- GC object definition and conversions -------------------------------- */ + +/* GC header for generic access to common fields of GC objects. */ +typedef struct GChead { + GCHeader; + uint8_t unused1; + uint8_t unused2; + GCRef env; + GCRef gclist; + GCRef metatable; +} GChead; + +/* The env field SHOULD be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env)); +LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env)); + +/* The metatable field MUST be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable)); +LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable)); + +/* The gclist field MUST be at the same offset for all GC objects. */ +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist)); +LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist)); + +typedef union GCobj { + GChead gch; + GCstr str; + GCupval uv; + lua_State th; + GCproto pt; + GCfunc fn; + GCcdata cd; + GCtab tab; + GCudata ud; +} GCobj; + +/* Macros to convert a GCobj pointer into a specific value. */ +#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str) +#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv) +#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th) +#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt) +#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn) +#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd) +#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab) +#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud) + +/* Macro to convert any collectable object into a GCobj pointer. */ +#define obj2gco(v) ((GCobj *)(v)) + +/* -- TValue getters/setters ---------------------------------------------- */ + +#ifdef LUA_USE_ASSERT +#include "lj_gc.h" +#endif + +/* Macros to test types. */ +#define itype(o) ((o)->it) +#define tvisnil(o) (itype(o) == LJ_TNIL) +#define tvisfalse(o) (itype(o) == LJ_TFALSE) +#define tvistrue(o) (itype(o) == LJ_TTRUE) +#define tvisbool(o) (tvisfalse(o) || tvistrue(o)) +#if LJ_64 +#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2) +#else +#define tvislightud(o) (itype(o) == LJ_TLIGHTUD) +#endif +#define tvisstr(o) (itype(o) == LJ_TSTR) +#define tvisfunc(o) (itype(o) == LJ_TFUNC) +#define tvisthread(o) (itype(o) == LJ_TTHREAD) +#define tvisproto(o) (itype(o) == LJ_TPROTO) +#define tviscdata(o) (itype(o) == LJ_TCDATA) +#define tvistab(o) (itype(o) == LJ_TTAB) +#define tvisudata(o) (itype(o) == LJ_TUDATA) +#define tvisnumber(o) (itype(o) <= LJ_TISNUM) +#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM) +#define tvisnum(o) (itype(o) < LJ_TISNUM) + +#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND) +#define tvispri(o) (itype(o) >= LJ_TISPRI) +#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */ +#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV)) + +/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */ +#define tvisnan(o) ((o)->n != (o)->n) +#if LJ_64 +#define tviszero(o) (((o)->u64 << 1) == 0) +#else +#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0) +#endif +#define tvispzero(o) ((o)->u64 == 0) +#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000)) +#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000)) +#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64) + +/* Macros to convert type ids. */ +#if LJ_64 +#define itypemap(o) \ + (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o)) +#else +#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o)) +#endif + +/* Macros to get tagged values. */ +#define gcval(o) (gcref((o)->gcr)) +#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it)) +#if LJ_64 +#define lightudV(o) \ + check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff))) +#else +#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void)) +#endif +#define gcV(o) check_exp(tvisgcv(o), gcval(o)) +#define strV(o) check_exp(tvisstr(o), &gcval(o)->str) +#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn) +#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th) +#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt) +#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd) +#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab) +#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud) +#define numV(o) check_exp(tvisnum(o), (o)->n) +#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i) + +/* Macros to set tagged values. */ +#define setitype(o, i) ((o)->it = (i)) +#define setnilV(o) ((o)->it = LJ_TNIL) +#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x)) + +static LJ_AINLINE void setlightudV(TValue *o, void *p) +{ +#if LJ_64 + o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48); +#else + setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD); +#endif +} + +#if LJ_64 +#define checklightudptr(L, p) \ + (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p)) +#define setcont(o, f) \ + ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin) +#else +#define checklightudptr(L, p) (p) +#define setcont(o, f) setlightudV((o), (void *)(f)) +#endif + +#define tvchecklive(L, o) \ + UNUSED(L), lua_assert(!tvisgcv(o) || \ + ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o)))) + +static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype) +{ + setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o); +} + +#define define_setV(name, type, tag) \ +static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \ +{ \ + setgcV(L, o, obj2gco(v), tag); \ +} +define_setV(setstrV, GCstr, LJ_TSTR) +define_setV(setthreadV, lua_State, LJ_TTHREAD) +define_setV(setprotoV, GCproto, LJ_TPROTO) +define_setV(setfuncV, GCfunc, LJ_TFUNC) +define_setV(setcdataV, GCcdata, LJ_TCDATA) +define_setV(settabV, GCtab, LJ_TTAB) +define_setV(setudataV, GCudata, LJ_TUDATA) + +#define setnumV(o, x) ((o)->n = (x)) +#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000)) +#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000)) +#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000)) + +static LJ_AINLINE void setintV(TValue *o, int32_t i) +{ +#if LJ_DUALNUM + o->i = (uint32_t)i; setitype(o, LJ_TISNUM); +#else + o->n = (lua_Number)i; +#endif +} + +static LJ_AINLINE void setint64V(TValue *o, int64_t i) +{ + if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i)) + setintV(o, (int32_t)i); + else + setnumV(o, (lua_Number)i); +} + +#if LJ_64 +#define setintptrV(o, i) setint64V((o), (i)) +#else +#define setintptrV(o, i) setintV((o), (i)) +#endif + +/* Copy tagged values. */ +static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) +{ + *o1 = *o2; tvchecklive(L, o1); +} + +/* -- Number to integer conversion ---------------------------------------- */ + +#if LJ_SOFTFP +LJ_ASMF int32_t lj_vm_tobit(double x); +#endif + +static LJ_AINLINE int32_t lj_num2bit(lua_Number n) +{ +#if LJ_SOFTFP + return lj_vm_tobit(n); +#else + TValue o; + o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */ + return (int32_t)o.u32.lo; +#endif +} + +#if LJ_TARGET_X86 && !defined(__SSE2__) +#define lj_num2int(n) lj_num2bit((n)) +#else +#define lj_num2int(n) ((int32_t)(n)) +#endif + +static LJ_AINLINE uint64_t lj_num2u64(lua_Number n) +{ +#ifdef _MSC_VER + if (n >= 9223372036854775808.0) /* They think it's a feature. */ + return (uint64_t)(int64_t)(n - 18446744073709551616.0); + else +#endif + return (uint64_t)n; +} + +static LJ_AINLINE int32_t numberVint(cTValue *o) +{ + if (LJ_LIKELY(tvisint(o))) + return intV(o); + else + return lj_num2int(numV(o)); +} + +static LJ_AINLINE lua_Number numberVnum(cTValue *o) +{ + if (LJ_UNLIKELY(tvisint(o))) + return (lua_Number)intV(o); + else + return numV(o); +} + +/* -- Miscellaneous object handling --------------------------------------- */ + +/* Names and maps for internal and external object tags. */ +LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1]; +LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1]; + +#define typename(o) (lj_obj_itypename[itypemap(o)]) + +/* Compare two objects without calling metamethods. */ +LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2); + +#endif diff --git a/src/luajit2/src/lj_opt_dce.c b/src/luajit2/src/lj_opt_dce.c new file mode 100644 index 0000000000..253fe3d213 --- /dev/null +++ b/src/luajit2/src/lj_opt_dce.c @@ -0,0 +1,79 @@ +/* +** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_dce_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Scan through all snapshots and mark all referenced instructions. */ +static void dce_marksnap(jit_State *J) +{ + SnapNo i, nsnap = J->cur.nsnap; + for (i = 0; i < nsnap; i++) { + SnapShot *snap = &J->cur.snap[i]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + IRRef ref = snap_ref(map[n]); + if (ref >= REF_FIRST) + irt_setmark(IR(ref)->t); + } + } +} + +/* Backwards propagate marks. Replace unused instructions with NOPs. */ +static void dce_propagate(jit_State *J) +{ + IRRef1 *pchain[IR__MAX]; + IRRef ins; + uint32_t i; + for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i]; + for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) { + IRIns *ir = IR(ins); + if (irt_ismarked(ir->t)) { + irt_clearmark(ir->t); + pchain[ir->o] = &ir->prev; + } else if (!ir_sideeff(ir)) { + *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */ + *pchain[IR_NOP] = (IRRef1)ins; + ir->t.irt = IRT_NIL; + ir->o = IR_NOP; /* Replace instruction with NOP. */ + ir->op1 = ir->op2 = 0; + pchain[IR_NOP] = &ir->prev; + continue; + } + if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); + if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); + } + *pchain[IR_NOP] = 0; /* Terminate NOP chain. */ +} + +/* Dead Code Elimination. +** +** First backpropagate marks for all used instructions. Then replace +** the unused ones with a NOP. Note that compressing the IR to eliminate +** the NOPs does not pay off. +*/ +void lj_opt_dce(jit_State *J) +{ + if ((J->flags & JIT_F_OPT_DCE)) { + dce_marksnap(J); + dce_propagate(J); + } +} + +#undef IR + +#endif diff --git a/src/luajit2/src/lj_opt_fold.c b/src/luajit2/src/lj_opt_fold.c new file mode 100644 index 0000000000..2ecac2d95c --- /dev/null +++ b/src/luajit2/src/lj_opt_fold.c @@ -0,0 +1,2186 @@ +/* +** FOLD: Constant Folding, Algebraic Simplifications and Reassociation. +** ABCelim: Array Bounds Check Elimination. +** CSE: Common-Subexpression Elimination. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_fold_c +#define LUA_CORE + +#include <math.h> + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_carith.h" +#include "lj_vm.h" + +/* Here's a short description how the FOLD engine processes instructions: +** +** The FOLD engine receives a single instruction stored in fins (J->fold.ins). +** The instruction and its operands are used to select matching fold rules. +** These are applied iteratively until a fixed point is reached. +** +** The 8 bit opcode of the instruction itself plus the opcodes of the +** two instructions referenced by its operands form a 24 bit key +** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits). +** +** This key is used for partial matching against the fold rules. The +** left/right operand fields of the key are successively masked with +** the 'any' wildcard, from most specific to least specific: +** +** ins left right +** ins any right +** ins left any +** ins any any +** +** The masked key is used to lookup a matching fold rule in a semi-perfect +** hash table. If a matching rule is found, the related fold function is run. +** Multiple rules can share the same fold function. A fold rule may return +** one of several special values: +** +** - NEXTFOLD means no folding was applied, because an additional test +** inside the fold function failed. Matching continues against less +** specific fold rules. Finally the instruction is passed on to CSE. +** +** - RETRYFOLD means the instruction was modified in-place. Folding is +** retried as if this instruction had just been received. +** +** All other return values are terminal actions -- no further folding is +** applied: +** +** - INTFOLD(i) returns a reference to the integer constant i. +** +** - LEFTFOLD and RIGHTFOLD return the left/right operand reference +** without emitting an instruction. +** +** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit +** it without passing through any further optimizations. +** +** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have +** no result (e.g. guarded assertions): FAILFOLD means the guard would +** always fail, i.e. the current trace is pointless. DROPFOLD means +** the guard is always true and has been eliminated. CONDFOLD is a +** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail). +** +** - Any other return value is interpreted as an IRRef or TRef. This +** can be a reference to an existing or a newly created instruction. +** Only the least-significant 16 bits (IRRef1) are used to form a TRef +** which is finally returned to the caller. +** +** The FOLD engine receives instructions both from the trace recorder and +** substituted instructions from LOOP unrolling. This means all types +** of instructions may end up here, even though the recorder bypasses +** FOLD in some cases. Thus all loads, stores and allocations must have +** an any/any rule to avoid being passed on to CSE. +** +** Carefully read the following requirements before adding or modifying +** any fold rules: +** +** Requirement #1: All fold rules must preserve their destination type. +** +** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result). +** Never use lj_ir_knumint() which can have either a KINT or KNUM result. +** +** Requirement #2: Fold rules should not create *new* instructions which +** reference operands *across* PHIs. +** +** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the +** left operand is a PHI. Then fleft->op1 would point across the PHI +** frontier to an invariant instruction. Adding a PHI for this instruction +** would be counterproductive. The solution is to add a barrier which +** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case. +** The only exception is for recurrences with high latencies like +** repeated int->num->int conversions. +** +** One could relax this condition a bit if the referenced instruction is +** a PHI, too. But this often leads to worse code due to excessive +** register shuffling. +** +** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though. +** Even returning fleft->op1 would be ok, because a new PHI will added, +** if needed. But again, this leads to excessive register shuffling and +** should be avoided. +** +** Requirement #3: The set of all fold rules must be monotonic to guarantee +** termination. +** +** The goal is optimization, so one primarily wants to add strength-reducing +** rules. This means eliminating an instruction or replacing an instruction +** with one or more simpler instructions. Don't add fold rules which point +** into the other direction. +** +** Some rules (like commutativity) do not directly reduce the strength of +** an instruction, but enable other fold rules (e.g. by moving constants +** to the right operand). These rules must be made unidirectional to avoid +** cycles. +** +** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) +#define fleft (&J->fold.left) +#define fright (&J->fold.right) +#define knumleft (ir_knum(fleft)->n) +#define knumright (ir_knum(fright)->n) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Fold function type. Fastcall on x86 significantly reduces their size. */ +typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J); + +/* Macros for the fold specs, so buildvm can recognize them. */ +#define LJFOLD(x) +#define LJFOLDX(x) +#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J) +/* Note: They must be at the start of a line or buildvm ignores them! */ + +/* Barrier to prevent using operands across PHIs. */ +#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD + +/* Barrier to prevent folding across a GC step. +** GC steps can only happen at the head of a trace and at LOOP. +** And the GC is only driven forward if there is at least one allocation. +*/ +#define gcstep_barrier(J, ref) \ + ((ref) < J->chain[IR_LOOP] && \ + (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \ + J->chain[IR_TNEW] || J->chain[IR_TDUP] || \ + J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR])) + +/* -- Constant folding for FP numbers ------------------------------------- */ + +LJFOLD(ADD KNUM KNUM) +LJFOLD(SUB KNUM KNUM) +LJFOLD(MUL KNUM KNUM) +LJFOLD(DIV KNUM KNUM) +LJFOLD(NEG KNUM KNUM) +LJFOLD(ABS KNUM KNUM) +LJFOLD(ATAN2 KNUM KNUM) +LJFOLD(LDEXP KNUM KNUM) +LJFOLD(MIN KNUM KNUM) +LJFOLD(MAX KNUM KNUM) +LJFOLDF(kfold_numarith) +{ + lua_Number a = knumleft; + lua_Number b = knumright; + lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD); + return lj_ir_knum(J, y); +} + +LJFOLD(LDEXP KNUM KINT) +LJFOLDF(kfold_ldexp) +{ +#if LJ_TARGET_X86ORX64 + UNUSED(J); + return NEXTFOLD; +#else + return lj_ir_knum(J, ldexp(knumleft, fright->i)); +#endif +} + +LJFOLD(FPMATH KNUM any) +LJFOLDF(kfold_fpmath) +{ + lua_Number a = knumleft; + lua_Number y = lj_vm_foldfpm(a, fins->op2); + return lj_ir_knum(J, y); +} + +LJFOLD(POW KNUM KINT) +LJFOLDF(kfold_numpow) +{ + lua_Number a = knumleft; + lua_Number b = (lua_Number)fright->i; + lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD); + return lj_ir_knum(J, y); +} + +/* Must not use kfold_kref for numbers (could be NaN). */ +LJFOLD(EQ KNUM KNUM) +LJFOLD(NE KNUM KNUM) +LJFOLD(LT KNUM KNUM) +LJFOLD(GE KNUM KNUM) +LJFOLD(LE KNUM KNUM) +LJFOLD(GT KNUM KNUM) +LJFOLD(ULT KNUM KNUM) +LJFOLD(UGE KNUM KNUM) +LJFOLD(ULE KNUM KNUM) +LJFOLD(UGT KNUM KNUM) +LJFOLDF(kfold_numcomp) +{ + return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o)); +} + +/* -- Constant folding for 32 bit integers -------------------------------- */ + +static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op) +{ + switch (op) { + case IR_ADD: k1 += k2; break; + case IR_SUB: k1 -= k2; break; + case IR_MUL: k1 *= k2; break; + case IR_MOD: k1 = lj_vm_modi(k1, k2); break; + case IR_BAND: k1 &= k2; break; + case IR_BOR: k1 |= k2; break; + case IR_BXOR: k1 ^= k2; break; + case IR_BSHL: k1 <<= (k2 & 31); break; + case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break; + case IR_BSAR: k1 >>= (k2 & 31); break; + case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break; + case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; + case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; + case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; + default: lua_assert(0); break; + } + return k1; +} + +LJFOLD(ADD KINT KINT) +LJFOLD(SUB KINT KINT) +LJFOLD(MUL KINT KINT) +LJFOLD(MOD KINT KINT) +LJFOLD(BAND KINT KINT) +LJFOLD(BOR KINT KINT) +LJFOLD(BXOR KINT KINT) +LJFOLD(BSHL KINT KINT) +LJFOLD(BSHR KINT KINT) +LJFOLD(BSAR KINT KINT) +LJFOLD(BROL KINT KINT) +LJFOLD(BROR KINT KINT) +LJFOLD(MIN KINT KINT) +LJFOLD(MAX KINT KINT) +LJFOLDF(kfold_intarith) +{ + return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o)); +} + +LJFOLD(ADDOV KINT KINT) +LJFOLD(SUBOV KINT KINT) +LJFOLD(MULOV KINT KINT) +LJFOLDF(kfold_intovarith) +{ + lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i, + fins->o - IR_ADDOV); + int32_t k = lj_num2int(n); + if (n != (lua_Number)k) + return FAILFOLD; + return INTFOLD(k); +} + +LJFOLD(BNOT KINT) +LJFOLDF(kfold_bnot) +{ + return INTFOLD(~fleft->i); +} + +LJFOLD(BSWAP KINT) +LJFOLDF(kfold_bswap) +{ + return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i)); +} + +LJFOLD(LT KINT KINT) +LJFOLD(GE KINT KINT) +LJFOLD(LE KINT KINT) +LJFOLD(GT KINT KINT) +LJFOLD(ULT KINT KINT) +LJFOLD(UGE KINT KINT) +LJFOLD(ULE KINT KINT) +LJFOLD(UGT KINT KINT) +LJFOLD(ABC KINT KINT) +LJFOLDF(kfold_intcomp) +{ + int32_t a = fleft->i, b = fright->i; + switch ((IROp)fins->o) { + case IR_LT: return CONDFOLD(a < b); + case IR_GE: return CONDFOLD(a >= b); + case IR_LE: return CONDFOLD(a <= b); + case IR_GT: return CONDFOLD(a > b); + case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b); + case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b); + case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); + case IR_ABC: + case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); + default: lua_assert(0); return FAILFOLD; + } +} + +LJFOLD(UGE any KINT) +LJFOLDF(kfold_intcomp0) +{ + if (fright->i == 0) + return DROPFOLD; + return NEXTFOLD; +} + +/* -- Constant folding for 64 bit integers -------------------------------- */ + +static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) +{ + switch (op) { +#if LJ_64 || LJ_HASFFI + case IR_ADD: k1 += k2; break; + case IR_SUB: k1 -= k2; break; +#endif +#if LJ_HASFFI + case IR_MUL: k1 *= k2; break; + case IR_BAND: k1 &= k2; break; + case IR_BOR: k1 |= k2; break; + case IR_BXOR: k1 ^= k2; break; +#endif + default: UNUSED(k2); lua_assert(0); break; + } + return k1; +} + +LJFOLD(ADD KINT64 KINT64) +LJFOLD(SUB KINT64 KINT64) +LJFOLD(MUL KINT64 KINT64) +LJFOLD(BAND KINT64 KINT64) +LJFOLD(BOR KINT64 KINT64) +LJFOLD(BXOR KINT64 KINT64) +LJFOLDF(kfold_int64arith) +{ + return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64, + ir_k64(fright)->u64, (IROp)fins->o)); +} + +LJFOLD(DIV KINT64 KINT64) +LJFOLD(MOD KINT64 KINT64) +LJFOLD(POW KINT64 KINT64) +LJFOLDF(kfold_int64arith2) +{ +#if LJ_HASFFI + uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64; + if (irt_isi64(fins->t)) { + k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) : + fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) : + lj_carith_powi64((int64_t)k1, (int64_t)k2); + } else { + k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) : + fins->o == IR_MOD ? lj_carith_modu64(k1, k2) : + lj_carith_powu64(k1, k2); + } + return INT64FOLD(k1); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BSHL KINT64 KINT) +LJFOLD(BSHR KINT64 KINT) +LJFOLD(BSAR KINT64 KINT) +LJFOLD(BROL KINT64 KINT) +LJFOLD(BROR KINT64 KINT) +LJFOLDF(kfold_int64shift) +{ +#if LJ_HASFFI || LJ_64 + uint64_t k = ir_k64(fleft)->u64; + int32_t sh = (fright->i & 63); + switch ((IROp)fins->o) { + case IR_BSHL: k <<= sh; break; +#if LJ_HASFFI + case IR_BSHR: k >>= sh; break; + case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break; + case IR_BROL: k = lj_rol(k, sh); break; + case IR_BROR: k = lj_ror(k, sh); break; +#endif + default: lua_assert(0); break; + } + return INT64FOLD(k); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BNOT KINT64) +LJFOLDF(kfold_bnot64) +{ +#if LJ_HASFFI + return INT64FOLD(~ir_k64(fleft)->u64); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(BSWAP KINT64) +LJFOLDF(kfold_bswap64) +{ +#if LJ_HASFFI + return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(LT KINT64 KINT) +LJFOLD(GE KINT64 KINT) +LJFOLD(LE KINT64 KINT) +LJFOLD(GT KINT64 KINT) +LJFOLD(ULT KINT64 KINT) +LJFOLD(UGE KINT64 KINT) +LJFOLD(ULE KINT64 KINT) +LJFOLD(UGT KINT64 KINT) +LJFOLDF(kfold_int64comp) +{ +#if LJ_HASFFI + uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64; + switch ((IROp)fins->o) { + case IR_LT: return CONDFOLD(a < b); + case IR_GE: return CONDFOLD(a >= b); + case IR_LE: return CONDFOLD(a <= b); + case IR_GT: return CONDFOLD(a > b); + case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b); + case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b); + case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b); + case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b); + default: lua_assert(0); return FAILFOLD; + } +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(UGE any KINT64) +LJFOLDF(kfold_int64comp0) +{ +#if LJ_HASFFI + if (ir_k64(fright)->u64 == 0) + return DROPFOLD; + return NEXTFOLD; +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +/* -- Constant folding for strings ---------------------------------------- */ + +LJFOLD(SNEW KKPTR KINT) +LJFOLDF(kfold_snew_kptr) +{ + GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i); + return lj_ir_kstr(J, s); +} + +LJFOLD(SNEW any KINT) +LJFOLDF(kfold_snew_empty) +{ + if (fright->i == 0) + return lj_ir_kstr(J, &J2G(J)->strempty); + return NEXTFOLD; +} + +LJFOLD(STRREF KGC KINT) +LJFOLDF(kfold_strref) +{ + GCstr *str = ir_kstr(fleft); + lua_assert((MSize)fright->i < str->len); + return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); +} + +LJFOLD(STRREF SNEW any) +LJFOLDF(kfold_strref_snew) +{ + PHIBARRIER(fleft); + if (irref_isk(fins->op2) && fright->i == 0) { + return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */ + } else { + /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */ + IRIns *ir = IR(fleft->op1); + IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */ + lua_assert(ir->o == IR_STRREF); + PHIBARRIER(ir); + fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */ + fins->op1 = str; + fins->ot = IRT(IR_STRREF, IRT_P32); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(CALLN CARG IRCALL_lj_str_cmp) +LJFOLDF(kfold_strcmp) +{ + if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) { + GCstr *a = ir_kstr(IR(fleft->op1)); + GCstr *b = ir_kstr(IR(fleft->op2)); + return INTFOLD(lj_str_cmp(a, b)); + } + return NEXTFOLD; +} + +/* -- Constant folding of pointer arithmetic ------------------------------ */ + +LJFOLD(ADD KGC KINT) +LJFOLD(ADD KGC KINT64) +LJFOLDF(kfold_add_kgc) +{ + GCobj *o = ir_kgc(fleft); +#if LJ_64 + ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; +#else + ptrdiff_t ofs = fright->i; +#endif + return lj_ir_kkptr(J, (char *)o + ofs); +} + +LJFOLD(ADD KPTR KINT) +LJFOLD(ADD KPTR KINT64) +LJFOLD(ADD KKPTR KINT) +LJFOLD(ADD KKPTR KINT64) +LJFOLDF(kfold_add_kptr) +{ + void *p = ir_kptr(fleft); +#if LJ_64 + ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64; +#else + ptrdiff_t ofs = fright->i; +#endif + return lj_ir_kptr_(J, fleft->o, (char *)p + ofs); +} + +/* -- Constant folding of conversions ------------------------------------- */ + +LJFOLD(TOBIT KNUM KNUM) +LJFOLDF(kfold_tobit) +{ + return INTFOLD(lj_num2bit(knumleft)); +} + +LJFOLD(CONV KINT IRCONV_NUM_INT) +LJFOLDF(kfold_conv_kint_num) +{ + return lj_ir_knum(J, (lua_Number)fleft->i); +} + +LJFOLD(CONV KINT IRCONV_NUM_U32) +LJFOLDF(kfold_conv_kintu32_num) +{ + return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i); +} + +LJFOLD(CONV KINT IRCONV_I64_INT) +LJFOLD(CONV KINT IRCONV_U64_INT) +LJFOLDF(kfold_conv_kint_i64) +{ + if ((fins->op2 & IRCONV_SEXT)) + return INT64FOLD((uint64_t)(int64_t)fleft->i); + else + return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i); +} + +LJFOLD(CONV KINT64 IRCONV_NUM_I64) +LJFOLDF(kfold_conv_kint64_num_i64) +{ + return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KINT64 IRCONV_NUM_U64) +LJFOLDF(kfold_conv_kint64_num_u64) +{ + return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KINT64 IRCONV_INT_I64) +LJFOLD(CONV KINT64 IRCONV_U32_I64) +LJFOLDF(kfold_conv_kint64_int_i64) +{ + return INTFOLD((int32_t)ir_kint64(fleft)->u64); +} + +LJFOLD(CONV KNUM IRCONV_INT_NUM) +LJFOLDF(kfold_conv_knum_int_num) +{ + lua_Number n = knumleft; + if (!(fins->op2 & IRCONV_TRUNC)) { + int32_t k = lj_num2int(n); + if (irt_isguard(fins->t) && n != (lua_Number)k) { + /* We're about to create a guard which always fails, like CONV +1.5. + ** Some pathological loops cause this during LICM, e.g.: + ** local x,k,t = 0,1.5,{1,[1.5]=2} + ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end + ** assert(x == 300) + */ + return FAILFOLD; + } + return INTFOLD(k); + } else { + return INTFOLD((int32_t)n); + } +} + +LJFOLD(CONV KNUM IRCONV_U32_NUM) +LJFOLDF(kfold_conv_knum_u32_num) +{ + lua_assert((fins->op2 & IRCONV_TRUNC)); + return INTFOLD((int32_t)(uint32_t)knumleft); +} + +LJFOLD(CONV KNUM IRCONV_I64_NUM) +LJFOLDF(kfold_conv_knum_i64_num) +{ + lua_assert((fins->op2 & IRCONV_TRUNC)); + return INT64FOLD((uint64_t)(int64_t)knumleft); +} + +LJFOLD(CONV KNUM IRCONV_U64_NUM) +LJFOLDF(kfold_conv_knum_u64_num) +{ + lua_assert((fins->op2 & IRCONV_TRUNC)); + return INT64FOLD(lj_num2u64(knumleft)); +} + +LJFOLD(TOSTR KNUM) +LJFOLDF(kfold_tostr_knum) +{ + return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft)); +} + +LJFOLD(TOSTR KINT) +LJFOLDF(kfold_tostr_kint) +{ + return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i)); +} + +LJFOLD(STRTO KGC) +LJFOLDF(kfold_strto) +{ + TValue n; + if (lj_str_tonum(ir_kstr(fleft), &n)) + return lj_ir_knum(J, numV(&n)); + return FAILFOLD; +} + +/* -- Constant folding of equality checks --------------------------------- */ + +/* Don't constant-fold away FLOAD checks against KNULL. */ +LJFOLD(EQ FLOAD KNULL) +LJFOLD(NE FLOAD KNULL) +LJFOLDX(lj_opt_cse) + +/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */ +LJFOLD(EQ any KNULL) +LJFOLD(NE any KNULL) +LJFOLD(EQ KNULL any) +LJFOLD(NE KNULL any) +LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */ +LJFOLD(NE KINT KINT) +LJFOLD(EQ KINT64 KINT64) +LJFOLD(NE KINT64 KINT64) +LJFOLD(EQ KGC KGC) +LJFOLD(NE KGC KGC) +LJFOLDF(kfold_kref) +{ + return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE)); +} + +/* -- Algebraic shortcuts ------------------------------------------------- */ + +LJFOLD(FPMATH FPMATH IRFPM_FLOOR) +LJFOLD(FPMATH FPMATH IRFPM_CEIL) +LJFOLD(FPMATH FPMATH IRFPM_TRUNC) +LJFOLDF(shortcut_round) +{ + IRFPMathOp op = (IRFPMathOp)fleft->op2; + if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC) + return LEFTFOLD; /* round(round_left(x)) = round_left(x) */ + return NEXTFOLD; +} + +LJFOLD(ABS ABS KNUM) +LJFOLDF(shortcut_left) +{ + return LEFTFOLD; /* f(g(x)) ==> g(x) */ +} + +LJFOLD(ABS NEG KNUM) +LJFOLDF(shortcut_dropleft) +{ + PHIBARRIER(fleft); + fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */ + return RETRYFOLD; +} + +/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */ +LJFOLD(NEG NEG any) +LJFOLD(BNOT BNOT) +LJFOLD(BSWAP BSWAP) +LJFOLDF(shortcut_leftleft) +{ + PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */ + return fleft->op1; /* f(g(x)) ==> x */ +} + +/* -- FP algebraic simplifications ---------------------------------------- */ + +/* FP arithmetic is tricky -- there's not much to simplify. +** Please note the following common pitfalls before sending "improvements": +** x+0 ==> x is INVALID for x=-0 +** 0-x ==> -x is INVALID for x=+0 +** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN +*/ + +LJFOLD(ADD NEG any) +LJFOLDF(simplify_numadd_negx) +{ + PHIBARRIER(fleft); + fins->o = IR_SUB; /* (-a) + b ==> b - a */ + fins->op1 = fins->op2; + fins->op2 = fleft->op1; + return RETRYFOLD; +} + +LJFOLD(ADD any NEG) +LJFOLDF(simplify_numadd_xneg) +{ + PHIBARRIER(fright); + fins->o = IR_SUB; /* a + (-b) ==> a - b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(SUB any KNUM) +LJFOLDF(simplify_numsub_k) +{ + lua_Number n = knumright; + if (n == 0.0) /* x - (+-0) ==> x */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(SUB NEG KNUM) +LJFOLDF(simplify_numsub_negk) +{ + PHIBARRIER(fleft); + fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */ + fins->op1 = (IRRef1)lj_ir_knum(J, -knumright); + return RETRYFOLD; +} + +LJFOLD(SUB any NEG) +LJFOLDF(simplify_numsub_xneg) +{ + PHIBARRIER(fright); + fins->o = IR_ADD; /* a - (-b) ==> a + b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(MUL any KNUM) +LJFOLD(DIV any KNUM) +LJFOLDF(simplify_nummuldiv_k) +{ + lua_Number n = knumright; + if (n == 1.0) { /* x o 1 ==> x */ + return LEFTFOLD; + } else if (n == -1.0) { /* x o -1 ==> -x */ + fins->o = IR_NEG; + fins->op2 = (IRRef1)lj_ir_knum_neg(J); + return RETRYFOLD; + } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */ + fins->o = IR_ADD; + fins->op2 = fins->op1; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MUL NEG KNUM) +LJFOLD(DIV NEG KNUM) +LJFOLDF(simplify_nummuldiv_negk) +{ + PHIBARRIER(fleft); + fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */ + fins->op2 = (IRRef1)lj_ir_knum(J, -knumright); + return RETRYFOLD; +} + +LJFOLD(MUL NEG NEG) +LJFOLD(DIV NEG NEG) +LJFOLDF(simplify_nummuldiv_negneg) +{ + PHIBARRIER(fleft); + PHIBARRIER(fright); + fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */ + fins->op2 = fright->op1; + return RETRYFOLD; +} + +LJFOLD(POW any KINT) +LJFOLDF(simplify_numpow_xk) +{ + int32_t k = fright->i; + TRef ref = fins->op1; + if (k == 0) /* x ^ 0 ==> 1 */ + return lj_ir_knum_one(J); /* Result must be a number, not an int. */ + if (k == 1) /* x ^ 1 ==> x */ + return LEFTFOLD; + if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */ + return NEXTFOLD; + if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */ + ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref); + k = -k; + } + /* Unroll x^k for 1 <= k <= 65536. */ + for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */ + ref = emitir(IRTN(IR_MUL), ref, ref); + if ((k >>= 1) != 0) { /* Handle trailing bits. */ + TRef tmp = emitir(IRTN(IR_MUL), ref, ref); + for (; k != 1; k >>= 1) { + if (k & 1) + ref = emitir(IRTN(IR_MUL), ref, tmp); + tmp = emitir(IRTN(IR_MUL), tmp, tmp); + } + ref = emitir(IRTN(IR_MUL), ref, tmp); + } + return ref; +} + +LJFOLD(POW KNUM any) +LJFOLDF(simplify_numpow_kx) +{ + lua_Number n = knumleft; + if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */ + fins->o = IR_CONV; +#if LJ_TARGET_X86ORX64 + fins->op1 = fins->op2; + fins->op2 = IRCONV_NUM_INT; + fins->op2 = (IRRef1)lj_opt_fold(J); +#endif + fins->op1 = (IRRef1)lj_ir_knum_one(J); + fins->o = IR_LDEXP; + return RETRYFOLD; + } + return NEXTFOLD; +} + +/* -- Simplify conversions ------------------------------------------------ */ + +LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */ +LJFOLDF(shortcut_conv_num_int) +{ + PHIBARRIER(fleft); + /* Only safe with a guarded conversion to int. */ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t)) + return fleft->op1; /* f(g(x)) ==> x */ + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */ +LJFOLDF(simplify_conv_int_num) +{ + /* Fold even across PHI to avoid expensive num->int conversions in loop. */ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/ +LJFOLDF(simplify_conv_u32_num) +{ + /* Fold even across PHI to avoid expensive num->int conversions in loop. */ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32*/ +LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32*/ +LJFOLDF(simplify_conv_i64_num) +{ + PHIBARRIER(fleft); + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { + /* Reduce to a sign-extension. */ + fins->op1 = fleft->op1; + fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT); + return RETRYFOLD; + } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { +#if LJ_TARGET_X64 + return fleft->op1; +#else + /* Reduce to a zero-extension. */ + fins->op1 = fleft->op1; + fins->op2 = (IRT_I64<<5)|IRT_U32; + return RETRYFOLD; +#endif + } + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT */ +LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT */ +LJFOLDF(simplify_conv_int_i64) +{ + PHIBARRIER(fleft); + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(CONV CONV IRCONV_NUM_FLOAT) /* _NUM */ +LJFOLDF(simplify_conv_flt_num) +{ + PHIBARRIER(fleft); + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM) + return fleft->op1; + return NEXTFOLD; +} + +/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ +LJFOLD(TOBIT CONV KNUM) +LJFOLDF(simplify_tobit_conv) +{ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT || + (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { + /* Fold even across PHI to avoid expensive num->int conversions in loop. */ + lua_assert(irt_isnum(fleft->t)); + return fleft->op1; + } + return NEXTFOLD; +} + +/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */ +LJFOLD(FPMATH CONV IRFPM_FLOOR) +LJFOLD(FPMATH CONV IRFPM_CEIL) +LJFOLD(FPMATH CONV IRFPM_TRUNC) +LJFOLDF(simplify_floor_conv) +{ + if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT || + (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) + return LEFTFOLD; + return NEXTFOLD; +} + +/* Strength reduction of widening. */ +LJFOLD(CONV any IRCONV_I64_INT) +LJFOLDF(simplify_conv_sext) +{ + IRRef ref = fins->op1; + int64_t ofs = 0; + if (!(fins->op2 & IRCONV_SEXT)) + return NEXTFOLD; + PHIBARRIER(fleft); + if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t))) + goto ok_reduce; + if (fleft->o == IR_ADD && irref_isk(fleft->op2)) { + ofs = (int64_t)IR(fleft->op2)->i; + ref = fleft->op1; + } + /* Use scalar evolution analysis results to strength-reduce sign-extension. */ + if (ref == J->scev.idx) { + IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; + lua_assert(irt_isint(J->scev.t)); + if (lo && IR(lo)->i + ofs >= 0) { + ok_reduce: +#if LJ_TARGET_X64 + /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */ + return LEFTFOLD; +#else + /* Reduce to a (cheaper) zero-extension. */ + fins->op2 &= ~IRCONV_SEXT; + return RETRYFOLD; +#endif + } + } + return NEXTFOLD; +} + +/* Strength reduction of narrowing. */ +LJFOLD(CONV ADD IRCONV_INT_I64) +LJFOLD(CONV SUB IRCONV_INT_I64) +LJFOLD(CONV MUL IRCONV_INT_I64) +LJFOLD(CONV ADD IRCONV_INT_U64) +LJFOLD(CONV SUB IRCONV_INT_U64) +LJFOLD(CONV MUL IRCONV_INT_U64) +LJFOLDF(simplify_conv_narrow) +{ + IROp op = (IROp)fleft->o; + IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2; + PHIBARRIER(fleft); + op1 = emitir(IRTI(IR_CONV), op1, mode); + op2 = emitir(IRTI(IR_CONV), op2, mode); + fins->ot = IRTI(op); + fins->op1 = op1; + fins->op2 = op2; + return RETRYFOLD; +} + +/* Special CSE rule for CONV. */ +LJFOLD(CONV any any) +LJFOLDF(cse_conv) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK); + uint8_t guard = irt_isguard(fins->t); + IRRef ref = J->chain[IR_CONV]; + while (ref > op1) { + IRIns *ir = IR(ref); + /* Commoning with stronger checks is ok. */ + if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 && + irt_isguard(ir->t) >= guard) + return ref; + ref = ir->prev; + } + } + return EMITFOLD; /* No fallthrough to regular CSE. */ +} + +/* FP conversion narrowing. */ +LJFOLD(TOBIT ADD KNUM) +LJFOLD(TOBIT SUB KNUM) +LJFOLD(CONV ADD IRCONV_INT_NUM) +LJFOLD(CONV SUB IRCONV_INT_NUM) +LJFOLD(CONV ADD IRCONV_I64_NUM) +LJFOLD(CONV SUB IRCONV_I64_NUM) +LJFOLDF(narrow_convert) +{ + PHIBARRIER(fleft); + /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ + if (J->chain[IR_LOOP]) + return NEXTFOLD; + lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT); + return lj_opt_narrow_convert(J); +} + +/* -- Integer algebraic simplifications ----------------------------------- */ + +LJFOLD(ADD any KINT) +LJFOLD(ADDOV any KINT) +LJFOLD(SUBOV any KINT) +LJFOLDF(simplify_intadd_k) +{ + if (fright->i == 0) /* i o 0 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(MULOV any KINT) +LJFOLDF(simplify_intmul_k) +{ + if (fright->i == 0) /* i * 0 ==> 0 */ + return RIGHTFOLD; + if (fright->i == 1) /* i * 1 ==> i */ + return LEFTFOLD; + if (fright->i == 2) { /* i * 2 ==> i + i */ + fins->o = IR_ADDOV; + fins->op2 = fins->op1; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(SUB any KINT) +LJFOLDF(simplify_intsub_k) +{ + if (fright->i == 0) /* i - 0 ==> i */ + return LEFTFOLD; + fins->o = IR_ADD; /* i - k ==> i + (-k) */ + fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */ + return RETRYFOLD; +} + +LJFOLD(SUB KINT any) +LJFOLD(SUB KINT64 any) +LJFOLDF(simplify_intsub_kleft) +{ + if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) { + fins->o = IR_NEG; /* 0 - i ==> -i */ + fins->op1 = fins->op2; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(ADD any KINT64) +LJFOLDF(simplify_intadd_k64) +{ + if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(SUB any KINT64) +LJFOLDF(simplify_intsub_k64) +{ + uint64_t k = ir_kint64(fright)->u64; + if (k == 0) /* i - 0 ==> i */ + return LEFTFOLD; + fins->o = IR_ADD; /* i - k ==> i + (-k) */ + fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k); + return RETRYFOLD; +} + +static TRef simplify_intmul_k(jit_State *J, int32_t k) +{ + /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2. + ** But this is mainly intended for simple address arithmetic. + ** Also it's easier for the backend to optimize the original multiplies. + */ + if (k == 1) { /* i * 1 ==> i */ + return LEFTFOLD; + } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */ + fins->o = IR_BSHL; + fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k)); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MUL any KINT) +LJFOLDF(simplify_intmul_k32) +{ + if (fright->i == 0) /* i * 0 ==> 0 */ + return INTFOLD(0); + else if (fright->i > 0) + return simplify_intmul_k(J, fright->i); + return NEXTFOLD; +} + +LJFOLD(MUL any KINT64) +LJFOLDF(simplify_intmul_k64) +{ + if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */ + return INT64FOLD(0); +#if LJ_64 + /* NYI: SPLIT for BSHL and 32 bit backend support. */ + else if (ir_kint64(fright)->u64 < 0x80000000u) + return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); +#endif + return NEXTFOLD; +} + +LJFOLD(MOD any KINT) +LJFOLDF(simplify_intmod_k) +{ + int32_t k = fright->i; + lua_assert(k != 0); + if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ + fins->o = IR_BAND; + fins->op2 = lj_ir_kint(J, k-1); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MOD KINT any) +LJFOLDF(simplify_intmod_kleft) +{ + if (fleft->i == 0) + return INTFOLD(0); + return NEXTFOLD; +} + +LJFOLD(SUB any any) +LJFOLD(SUBOV any any) +LJFOLDF(simplify_intsub) +{ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */ + return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); + return NEXTFOLD; +} + +LJFOLD(SUB ADD any) +LJFOLDF(simplify_intsubadd_leftcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */ + return fleft->op2; + if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */ + return fleft->op1; + } + return NEXTFOLD; +} + +LJFOLD(SUB SUB any) +LJFOLDF(simplify_intsubsub_leftcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + if (fins->op1 == fleft->op1) { /* (i - j) - i ==> 0 - j */ + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + fins->op2 = fleft->op2; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(SUB any SUB) +LJFOLDF(simplify_intsubsub_rightcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fright); + if (fins->op1 == fright->op1) /* i - (i - j) ==> j */ + return fright->op2; + } + return NEXTFOLD; +} + +LJFOLD(SUB any ADD) +LJFOLDF(simplify_intsubadd_rightcancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fright); + if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */ + fins->op2 = fright->op2; + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + return RETRYFOLD; + } + if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */ + fins->op2 = fright->op1; + fins->op1 = (IRRef1)lj_ir_kint(J, 0); + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(SUB ADD ADD) +LJFOLDF(simplify_intsubaddadd_cancel) +{ + if (!irt_isnum(fins->t)) { + PHIBARRIER(fleft); + PHIBARRIER(fright); + if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */ + fins->op1 = fleft->op2; + fins->op2 = fright->op2; + return RETRYFOLD; + } + if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */ + fins->op1 = fleft->op2; + fins->op2 = fright->op1; + return RETRYFOLD; + } + if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */ + fins->op1 = fleft->op1; + fins->op2 = fright->op2; + return RETRYFOLD; + } + if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */ + fins->op1 = fleft->op1; + fins->op2 = fright->op1; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(BAND any KINT) +LJFOLD(BAND any KINT64) +LJFOLDF(simplify_band_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i & 0 ==> 0 */ + return RIGHTFOLD; + if (k == -1) /* i & -1 ==> i */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(BOR any KINT) +LJFOLD(BOR any KINT64) +LJFOLDF(simplify_bor_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i | 0 ==> i */ + return LEFTFOLD; + if (k == -1) /* i | -1 ==> -1 */ + return RIGHTFOLD; + return NEXTFOLD; +} + +LJFOLD(BXOR any KINT) +LJFOLD(BXOR any KINT64) +LJFOLDF(simplify_bxor_k) +{ + int64_t k = fright->o == IR_KINT ? (int64_t)fright->i : + (int64_t)ir_k64(fright)->u64; + if (k == 0) /* i xor 0 ==> i */ + return LEFTFOLD; + if (k == -1) { /* i xor -1 ==> ~i */ + fins->o = IR_BNOT; + fins->op2 = 0; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(BSHL any KINT) +LJFOLD(BSHR any KINT) +LJFOLD(BSAR any KINT) +LJFOLD(BROL any KINT) +LJFOLD(BROR any KINT) +LJFOLDF(simplify_shift_ik) +{ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = (fright->i & mask); + if (k == 0) /* i o 0 ==> i */ + return LEFTFOLD; + if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */ + fins->o = IR_ADD; + fins->op2 = fins->op1; + return RETRYFOLD; + } + if (k != fright->i) { /* i o k ==> i o (k & mask) */ + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; + } +#ifndef LJ_TARGET_UNIFYROT + if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */ + fins->o = IR_BROL; + fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask); + return RETRYFOLD; + } +#endif + return NEXTFOLD; +} + +LJFOLD(BSHL any BAND) +LJFOLD(BSHR any BAND) +LJFOLD(BSAR any BAND) +LJFOLD(BROL any BAND) +LJFOLD(BROR any BAND) +LJFOLDF(simplify_shift_andk) +{ + IRIns *irk = IR(fright->op2); + PHIBARRIER(fright); + if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && + irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = irk->i & mask; + if (k == mask) { + fins->op2 = fright->op1; + return RETRYFOLD; + } + } + return NEXTFOLD; +} + +LJFOLD(BSHL KINT any) +LJFOLD(BSHR KINT any) +LJFOLD(BSHL KINT64 any) +LJFOLD(BSHR KINT64 any) +LJFOLDF(simplify_shift1_ki) +{ + int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : + (int64_t)ir_k64(fleft)->u64; + if (k == 0) /* 0 o i ==> 0 */ + return LEFTFOLD; + return NEXTFOLD; +} + +LJFOLD(BSAR KINT any) +LJFOLD(BROL KINT any) +LJFOLD(BROR KINT any) +LJFOLD(BSAR KINT64 any) +LJFOLD(BROL KINT64 any) +LJFOLD(BROR KINT64 any) +LJFOLDF(simplify_shift2_ki) +{ + int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i : + (int64_t)ir_k64(fleft)->u64; + if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */ + return LEFTFOLD; + return NEXTFOLD; +} + +/* -- Reassociation ------------------------------------------------------- */ + +LJFOLD(ADD ADD KINT) +LJFOLD(MUL MUL KINT) +LJFOLD(BAND BAND KINT) +LJFOLD(BOR BOR KINT) +LJFOLD(BXOR BXOR KINT) +LJFOLDF(reassoc_intarith_k) +{ + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KINT) { + int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o); + if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ + } + return NEXTFOLD; +} + +LJFOLD(ADD ADD KINT64) +LJFOLD(MUL MUL KINT64) +LJFOLD(BAND BAND KINT64) +LJFOLD(BOR BOR KINT64) +LJFOLD(BXOR BXOR KINT64) +LJFOLDF(reassoc_intarith_k64) +{ +#if LJ_HASFFI || LJ_64 + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KINT64) { + uint64_t k = kfold_int64arith(ir_k64(irk)->u64, + ir_k64(fright)->u64, (IROp)fins->o); + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint64(J, k); + return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */ + } + return NEXTFOLD; +#else + UNUSED(J); lua_assert(0); return FAILFOLD; +#endif +} + +LJFOLD(MIN MIN any) +LJFOLD(MAX MAX any) +LJFOLD(BAND BAND any) +LJFOLD(BOR BOR any) +LJFOLDF(reassoc_dup) +{ + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) + return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */ + return NEXTFOLD; +} + +LJFOLD(BXOR BXOR any) +LJFOLDF(reassoc_bxor) +{ + PHIBARRIER(fleft); + if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */ + return fleft->op2; + if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */ + return fleft->op1; + return NEXTFOLD; +} + +LJFOLD(BSHL BSHL KINT) +LJFOLD(BSHR BSHR KINT) +LJFOLD(BSAR BSAR KINT) +LJFOLD(BROL BROL KINT) +LJFOLD(BROR BROR KINT) +LJFOLDF(reassoc_shift) +{ + IRIns *irk = IR(fleft->op2); + PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */ + if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */ + int32_t mask = irt_is64(fins->t) ? 63 : 31; + int32_t k = (irk->i & mask) + (fright->i & mask); + if (k > mask) { /* Combined shift too wide? */ + if (fins->o == IR_BSHL || fins->o == IR_BSHR) + return mask == 31 ? INTFOLD(0) : INT64FOLD(0); + else if (fins->o == IR_BSAR) + k = mask; + else + k &= mask; + } + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, k); + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(MIN MIN KNUM) +LJFOLD(MAX MAX KNUM) +LJFOLD(MIN MIN KINT) +LJFOLD(MAX MAX KINT) +LJFOLDF(reassoc_minmax_k) +{ + IRIns *irk = IR(fleft->op2); + if (irk->o == IR_KNUM) { + lua_Number a = ir_knum(irk)->n; + lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD); + if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_knum(J, y); + return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ + } else if (irk->o == IR_KINT) { + int32_t a = irk->i; + int32_t y = kfold_intop(a, fright->i, fins->o); + if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ + return LEFTFOLD; + PHIBARRIER(fleft); + fins->op1 = fleft->op1; + fins->op2 = (IRRef1)lj_ir_kint(J, y); + return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */ + } + return NEXTFOLD; +} + +LJFOLD(MIN MAX any) +LJFOLD(MAX MIN any) +LJFOLDF(reassoc_minmax_left) +{ + if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2) + return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */ + return NEXTFOLD; +} + +LJFOLD(MIN any MAX) +LJFOLD(MAX any MIN) +LJFOLDF(reassoc_minmax_right) +{ + if (fins->op1 == fright->op1 || fins->op1 == fright->op2) + return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */ + return NEXTFOLD; +} + +/* -- Array bounds check elimination -------------------------------------- */ + +/* Eliminate ABC across PHIs to handle t[i-1] forwarding case. +** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists. +** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig. +*/ +LJFOLD(ABC any ADD) +LJFOLDF(abc_fwd) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { + if (irref_isk(fright->op2)) { + IRIns *add2 = IR(fright->op1); + if (add2->o == IR_ADD && irref_isk(add2->op2) && + IR(fright->op2)->i == -IR(add2->op2)->i) { + IRRef ref = J->chain[IR_ABC]; + IRRef lim = add2->op1; + if (fins->op1 > lim) lim = fins->op1; + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == fins->op1 && ir->op2 == add2->op1) + return DROPFOLD; + ref = ir->prev; + } + } + } + } + return NEXTFOLD; +} + +/* Eliminate ABC for constants. +** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2)) +** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2. +*/ +LJFOLD(ABC any KINT) +LJFOLDF(abc_k) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) { + IRRef ref = J->chain[IR_ABC]; + IRRef asize = fins->op1; + while (ref > asize) { + IRIns *ir = IR(ref); + if (ir->op1 == asize && irref_isk(ir->op2)) { + int32_t k = IR(ir->op2)->i; + if (fright->i > k) + ir->op2 = fins->op2; + return DROPFOLD; + } + ref = ir->prev; + } + return EMITFOLD; /* Already performed CSE. */ + } + return NEXTFOLD; +} + +/* Eliminate invariant ABC inside loop. */ +LJFOLD(ABC any any) +LJFOLDF(abc_invar) +{ + if (!irt_isint(fins->t) && J->chain[IR_LOOP]) /* Currently marked as PTR. */ + return DROPFOLD; + return NEXTFOLD; +} + +/* -- Commutativity ------------------------------------------------------- */ + +/* The refs of commutative ops are canonicalized. Lower refs go to the right. +** Rationale behind this: +** - It (also) moves constants to the right. +** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices). +** - It helps CSE to find more matches. +** - The assembler generates better code with constants at the right. +*/ + +LJFOLD(ADD any any) +LJFOLD(MUL any any) +LJFOLD(ADDOV any any) +LJFOLD(MULOV any any) +LJFOLDF(comm_swap) +{ + if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ + IRRef1 tmp = fins->op1; + fins->op1 = fins->op2; + fins->op2 = tmp; + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(EQ any any) +LJFOLD(NE any any) +LJFOLDF(comm_equal) +{ + /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) + return CONDFOLD(fins->o == IR_EQ); + return fold_comm_swap(J); +} + +LJFOLD(LT any any) +LJFOLD(GE any any) +LJFOLD(LE any any) +LJFOLD(GT any any) +LJFOLD(ULT any any) +LJFOLD(UGE any any) +LJFOLD(ULE any any) +LJFOLD(UGT any any) +LJFOLDF(comm_comp) +{ + /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */ + if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) + return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1); + if (fins->op1 < fins->op2) { /* Move lower ref to the right. */ + IRRef1 tmp = fins->op1; + fins->op1 = fins->op2; + fins->op2 = tmp; + fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */ + return RETRYFOLD; + } + return NEXTFOLD; +} + +LJFOLD(BAND any any) +LJFOLD(BOR any any) +LJFOLD(MIN any any) +LJFOLD(MAX any any) +LJFOLDF(comm_dup) +{ + if (fins->op1 == fins->op2) /* x o x ==> x */ + return LEFTFOLD; + return fold_comm_swap(J); +} + +LJFOLD(BXOR any any) +LJFOLDF(comm_bxor) +{ + if (fins->op1 == fins->op2) /* i xor i ==> 0 */ + return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0); + return fold_comm_swap(J); +} + +/* -- Simplification of compound expressions ------------------------------ */ + +static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p) +{ + int32_t k; + switch (irt_type(ir->t)) { + case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p); + case IRT_I8: k = (int32_t)*(int8_t *)p; break; + case IRT_U8: k = (int32_t)*(uint8_t *)p; break; + case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break; + case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break; + case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break; + case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p); + default: return 0; + } + return lj_ir_kint(J, k); +} + +/* Turn: string.sub(str, a, b) == kstr +** into: string.byte(str, a) == string.byte(kstr, 1) etc. +** Note: this creates unaligned XLOADs on x86/x64. +*/ +LJFOLD(EQ SNEW KGC) +LJFOLD(NE SNEW KGC) +LJFOLDF(merge_eqne_snew_kgc) +{ + GCstr *kstr = ir_kstr(fright); + int32_t len = (int32_t)kstr->len; + lua_assert(irt_isstr(fins->t)); + +#if LJ_TARGET_X86ORX64 +#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ +#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */ +#else +#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */ +#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */ +#endif + + if (len <= FOLD_SNEW_MAX_LEN) { + IROp op = (IROp)fins->o; + IRRef strref = fleft->op1; + lua_assert(IR(strref)->o == IR_STRREF); + if (op == IR_EQ) { + emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len)); + /* Caveat: fins/fleft/fright is no longer valid after emitir. */ + } else { + /* NE is not expanded since this would need an OR of two conds. */ + if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */ + return NEXTFOLD; + if (IR(fleft->op2)->i != len) + return DROPFOLD; + } + if (len > 0) { + /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */ + uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) : + len == 2 ? IRT(IR_XLOAD, IRT_U16) : + IRTI(IR_XLOAD)); + TRef tmp = emitir(ot, strref, + IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0)); + TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr)); + if (len == 3) + tmp = emitir(IRTI(IR_BAND), tmp, + lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00))); + fins->op1 = (IRRef1)tmp; + fins->op2 = (IRRef1)val; + fins->ot = (IROpT)IRTGI(op); + return RETRYFOLD; + } else { + return DROPFOLD; + } + } + return NEXTFOLD; +} + +/* -- Loads --------------------------------------------------------------- */ + +/* Loads cannot be folded or passed on to CSE in general. +** Alias analysis is needed to check for forwarding opportunities. +** +** Caveat: *all* loads must be listed here or they end up at CSE! +*/ + +LJFOLD(ALOAD any) +LJFOLDX(lj_opt_fwd_aload) + +/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */ +LJFOLD(HLOAD KKPTR) +LJFOLDF(kfold_hload_kkptr) +{ + UNUSED(J); + lua_assert(ir_kptr(fleft) == niltvg(J2G(J))); + return TREF_NIL; +} + +LJFOLD(HLOAD any) +LJFOLDX(lj_opt_fwd_hload) + +LJFOLD(ULOAD any) +LJFOLDX(lj_opt_fwd_uload) + +LJFOLD(CALLL any IRCALL_lj_tab_len) +LJFOLDX(lj_opt_fwd_tab_len) + +/* Upvalue refs are really loads, but there are no corresponding stores. +** So CSE is ok for them, except for UREFO across a GC step (see below). +** If the referenced function is const, its upvalue addresses are const, too. +** This can be used to improve CSE by looking for the same address, +** even if the upvalues originate from a different function. +*/ +LJFOLD(UREFO KGC any) +LJFOLD(UREFC KGC any) +LJFOLDF(cse_uref) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + IRRef ref = J->chain[fins->o]; + GCfunc *fn = ir_kfunc(fleft); + GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)])); + while (ref > 0) { + IRIns *ir = IR(ref); + if (irref_isk(ir->op1)) { + GCfunc *fn2 = ir_kfunc(IR(ir->op1)); + if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) { + if (fins->o == IR_UREFO && gcstep_barrier(J, ref)) + break; + return ref; + } + } + ref = ir->prev; + } + } + return EMITFOLD; +} + +LJFOLD(HREF TNEW any) +LJFOLDF(fwd_href_tnew) +{ + if (lj_opt_fwd_href_nokey(J)) + return lj_ir_kkptr(J, niltvg(J2G(J))); + return NEXTFOLD; +} + +LJFOLD(HREF TDUP KPRI) +LJFOLD(HREF TDUP KGC) +LJFOLD(HREF TDUP KNUM) +LJFOLDF(fwd_href_tdup) +{ + TValue keyv; + lj_ir_kvalue(J->L, &keyv, fright); + if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) && + lj_opt_fwd_href_nokey(J)) + return lj_ir_kkptr(J, niltvg(J2G(J))); + return NEXTFOLD; +} + +/* We can safely FOLD/CSE array/hash refs and field loads, since there +** are no corresponding stores. But we need to check for any NEWREF with +** an aliased table, as it may invalidate all of the pointers and fields. +** Only HREF needs the NEWREF check -- AREF and HREFK already depend on +** FLOADs. And NEWREF itself is treated like a store (see below). +*/ +LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE) +LJFOLDF(fload_tab_tnew_asize) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD(fleft->op1); + return NEXTFOLD; +} + +LJFOLD(FLOAD TNEW IRFL_TAB_HMASK) +LJFOLDF(fload_tab_tnew_hmask) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((1 << fleft->op2)-1); + return NEXTFOLD; +} + +LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE) +LJFOLDF(fload_tab_tdup_asize) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize); + return NEXTFOLD; +} + +LJFOLD(FLOAD TDUP IRFL_TAB_HMASK) +LJFOLDF(fload_tab_tdup_hmask) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1)) + return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask); + return NEXTFOLD; +} + +LJFOLD(HREF any any) +LJFOLD(FLOAD any IRFL_TAB_ARRAY) +LJFOLD(FLOAD any IRFL_TAB_NODE) +LJFOLD(FLOAD any IRFL_TAB_ASIZE) +LJFOLD(FLOAD any IRFL_TAB_HMASK) +LJFOLDF(fload_tab_ah) +{ + TRef tr = lj_opt_cse(J); + return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD; +} + +/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */ +LJFOLD(FLOAD KGC IRFL_STR_LEN) +LJFOLDF(fload_str_len_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return INTFOLD((int32_t)ir_kstr(fleft)->len); + return NEXTFOLD; +} + +LJFOLD(FLOAD SNEW IRFL_STR_LEN) +LJFOLDF(fload_str_len_snew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { + PHIBARRIER(fleft); + return fleft->op2; + } + return NEXTFOLD; +} + +/* The C type ID of cdata objects is immutable. */ +LJFOLD(FLOAD KGC IRFL_CDATA_TYPEID) +LJFOLDF(fload_cdata_typeid_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return INTFOLD((int32_t)ir_kcdata(fleft)->typeid); + return NEXTFOLD; +} + +/* Get the contents of immutable cdata objects. */ +LJFOLD(FLOAD KGC IRFL_CDATA_PTR) +LJFOLD(FLOAD KGC IRFL_CDATA_INT64) +LJFOLDF(fload_cdata_int64_kgc) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { + void *p = cdataptr(ir_kcdata(fleft)); + if (irt_is64(fins->t)) + return INT64FOLD(*(uint64_t *)p); + else + return INTFOLD(*(int32_t *)p); + } + return NEXTFOLD; +} + +LJFOLD(FLOAD CNEW IRFL_CDATA_TYPEID) +LJFOLD(FLOAD CNEWI IRFL_CDATA_TYPEID) +LJFOLDF(fload_cdata_typeid_cnew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */ + return NEXTFOLD; +} + +/* Pointer and int64 cdata objects are immutable. */ +LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR) +LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64) +LJFOLDF(fload_cdata_ptr_int64_cnew) +{ + if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) + return fleft->op2; /* Fold even across PHI to avoid allocations. */ + return NEXTFOLD; +} + +LJFOLD(FLOAD any IRFL_STR_LEN) +LJFOLD(FLOAD any IRFL_CDATA_TYPEID) +LJFOLD(FLOAD any IRFL_CDATA_PTR) +LJFOLD(FLOAD any IRFL_CDATA_INT64) +LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */ +LJFOLDX(lj_opt_cse) + +/* All other field loads need alias analysis. */ +LJFOLD(FLOAD any any) +LJFOLDX(lj_opt_fwd_fload) + +/* This is for LOOP only. Recording handles SLOADs internally. */ +LJFOLD(SLOAD any any) +LJFOLDF(fwd_sload) +{ + if ((fins->op2 & IRSLOAD_FRAME)) { + TRef tr = lj_opt_cse(J); + return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; + } else { + lua_assert(J->slot[fins->op1] != 0); + return J->slot[fins->op1]; + } +} + +/* Only fold for KKPTR. The pointer _and_ the contents must be const. */ +LJFOLD(XLOAD KKPTR any) +LJFOLDF(xload_kptr) +{ + TRef tr = kfold_xload(J, fins, ir_kptr(fleft)); + return tr ? tr : NEXTFOLD; +} + +LJFOLD(XLOAD any any) +LJFOLDX(lj_opt_fwd_xload) + +/* -- Write barriers ------------------------------------------------------ */ + +/* Write barriers are amenable to CSE, but not across any incremental +** GC steps. +** +** The same logic applies to open upvalue references, because a stack +** may be resized during a GC step (not the current stack, but maybe that +** of a coroutine). +*/ +LJFOLD(TBAR any) +LJFOLD(OBAR any any) +LJFOLD(UREFO any any) +LJFOLDF(barrier_tab) +{ + TRef tr = lj_opt_cse(J); + if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */ + return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */ + return tr; +} + +LJFOLD(TBAR TNEW) +LJFOLD(TBAR TDUP) +LJFOLDF(barrier_tnew_tdup) +{ + /* New tables are always white and never need a barrier. */ + if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */ + return NEXTFOLD; + return DROPFOLD; +} + +/* -- Stores and allocations ---------------------------------------------- */ + +/* Stores and allocations cannot be folded or passed on to CSE in general. +** But some stores can be eliminated with dead-store elimination (DSE). +** +** Caveat: *all* stores and allocs must be listed here or they end up at CSE! +*/ + +LJFOLD(ASTORE any any) +LJFOLD(HSTORE any any) +LJFOLDX(lj_opt_dse_ahstore) + +LJFOLD(USTORE any any) +LJFOLDX(lj_opt_dse_ustore) + +LJFOLD(FSTORE any any) +LJFOLDX(lj_opt_dse_fstore) + +LJFOLD(XSTORE any any) +LJFOLDX(lj_opt_dse_xstore) + +LJFOLD(NEWREF any any) /* Treated like a store. */ +LJFOLD(CALLS any any) +LJFOLD(CALLL any any) /* Safeguard fallback. */ +LJFOLD(CALLXS any any) +LJFOLD(XBAR) +LJFOLD(RETF any any) /* Modifies BASE. */ +LJFOLD(TNEW any any) +LJFOLD(TDUP any) +LJFOLD(CNEW any any) +LJFOLD(XSNEW any any) +LJFOLDX(lj_ir_emit) + +/* ------------------------------------------------------------------------ */ + +/* Every entry in the generated hash table is a 32 bit pattern: +** +** xxxxxxxx iiiiiii lllllll rrrrrrrrrr +** +** xxxxxxxx = 8 bit index into fold function table +** iiiiiii = 7 bit folded instruction opcode +** lllllll = 7 bit left instruction opcode +** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field +*/ + +#include "lj_folddef.h" + +/* ------------------------------------------------------------------------ */ + +/* Fold IR instruction. */ +TRef LJ_FASTCALL lj_opt_fold(jit_State *J) +{ + uint32_t key, any; + IRRef ref; + + if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { + lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | + JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT); + /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ + if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) + return lj_opt_cse(J); + + /* Forwarding or CSE disabled? Emit raw IR for loads, except for SLOAD. */ + if ((J->flags & (JIT_F_OPT_FWD|JIT_F_OPT_CSE)) != + (JIT_F_OPT_FWD|JIT_F_OPT_CSE) && + irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD) + return lj_ir_emit(J); + + /* DSE disabled? Emit raw IR for stores. */ + if (!(J->flags & JIT_F_OPT_DSE) && irm_kind(lj_ir_mode[fins->o]) == IRM_S) + return lj_ir_emit(J); + } + + /* Fold engine start/retry point. */ +retry: + /* Construct key from opcode and operand opcodes (unless literal/none). */ + key = ((uint32_t)fins->o << 17); + if (fins->op1 >= J->cur.nk) { + key += (uint32_t)IR(fins->op1)->o << 10; + *fleft = *IR(fins->op1); + } + if (fins->op2 >= J->cur.nk) { + key += (uint32_t)IR(fins->op2)->o; + *fright = *IR(fins->op2); + } else { + key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */ + } + + /* Check for a match in order from most specific to least specific. */ + any = 0; + for (;;) { + uint32_t k = key | (any & 0x1ffff); + uint32_t h = fold_hashkey(k); + uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */ + if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) { + ref = (IRRef)tref_ref(fold_func[fh >> 24](J)); + if (ref != NEXTFOLD) + break; + } + if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */ + return lj_opt_cse(J); + any = (any | (any >> 10)) ^ 0xffc00; + } + + /* Return value processing, ordered by frequency. */ + if (LJ_LIKELY(ref >= MAX_FOLD)) + return TREF(ref, irt_t(IR(ref)->t)); + if (ref == RETRYFOLD) + goto retry; + if (ref == KINTFOLD) + return lj_ir_kint(J, fins->i); + if (ref == FAILFOLD) + lj_trace_err(J, LJ_TRERR_GFAIL); + lua_assert(ref == DROPFOLD); + return REF_DROP; +} + +/* -- Common-Subexpression Elimination ------------------------------------ */ + +/* CSE an IR instruction. This is very fast due to the skip-list chains. */ +TRef LJ_FASTCALL lj_opt_cse(jit_State *J) +{ + /* Avoid narrow to wide store-to-load forwarding stall */ + IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); + IROp op = fins->o; + if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) { + /* Limited search for same operands in per-opcode chain. */ + IRRef ref = J->chain[op]; + IRRef lim = fins->op1; + if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */ + while (ref > lim) { + if (IR(ref)->op12 == op12) + return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */ + ref = IR(ref)->prev; + } + } + /* Otherwise emit IR (inlined for speed). */ + { + IRRef ref = lj_ir_nextins(J); + IRIns *ir = IR(ref); + ir->prev = J->chain[op]; + ir->op12 = op12; + J->chain[op] = (IRRef1)ref; + ir->o = fins->o; + J->guardemit.irt |= fins->t.irt; + return TREF(ref, irt_t((ir->t = fins->t))); + } +} + +/* CSE with explicit search limit. */ +TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim) +{ + IRRef ref = J->chain[fins->o]; + IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16); + while (ref > lim) { + if (IR(ref)->op12 == op12) + return ref; + ref = IR(ref)->prev; + } + return lj_ir_emit(J); +} + +/* ------------------------------------------------------------------------ */ + +#undef IR +#undef fins +#undef fleft +#undef fright +#undef knumleft +#undef knumright +#undef emitir + +#endif diff --git a/src/luajit2/src/lj_opt_loop.c b/src/luajit2/src/lj_opt_loop.c new file mode 100644 index 0000000000..6dd06636c6 --- /dev/null +++ b/src/luajit2/src/lj_opt_loop.c @@ -0,0 +1,385 @@ +/* +** LOOP: Loop Optimizations. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_loop_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_vm.h" + +/* Loop optimization: +** +** Traditional Loop-Invariant Code Motion (LICM) splits the instructions +** of a loop into invariant and variant instructions. The invariant +** instructions are hoisted out of the loop and only the variant +** instructions remain inside the loop body. +** +** Unfortunately LICM is mostly useless for compiling dynamic languages. +** The IR has many guards and most of the subsequent instructions are +** control-dependent on them. The first non-hoistable guard would +** effectively prevent hoisting of all subsequent instructions. +** +** That's why we use a special form of unrolling using copy-substitution, +** combined with redundancy elimination: +** +** The recorded instruction stream is re-emitted to the compiler pipeline +** with substituted operands. The substitution table is filled with the +** refs returned by re-emitting each instruction. This can be done +** on-the-fly, because the IR is in strict SSA form, where every ref is +** defined before its use. +** +** This aproach generates two code sections, separated by the LOOP +** instruction: +** +** 1. The recorded instructions form a kind of pre-roll for the loop. It +** contains a mix of invariant and variant instructions and performs +** exactly one loop iteration (but not necessarily the 1st iteration). +** +** 2. The loop body contains only the variant instructions and performs +** all remaining loop iterations. +** +** On first sight that looks like a waste of space, because the variant +** instructions are present twice. But the key insight is that the +** pre-roll honors the control-dependencies for *both* the pre-roll itself +** *and* the loop body! +** +** It also means one doesn't have to explicitly model control-dependencies +** (which, BTW, wouldn't help LICM much). And it's much easier to +** integrate sparse snapshotting with this approach. +** +** One of the nicest aspects of this approach is that all of the +** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be +** reused with only minor restrictions (e.g. one should not fold +** instructions across loop-carried dependencies). +** +** But in general all optimizations can be applied which only need to look +** backwards into the generated instruction stream. At any point in time +** during the copy-substitution process this contains both a static loop +** iteration (the pre-roll) and a dynamic one (from the to-be-copied +** instruction up to the end of the partial loop body). +** +** Since control-dependencies are implicitly kept, CSE also applies to all +** kinds of guards. The major advantage is that all invariant guards can +** be hoisted, too. +** +** Load/store forwarding works across loop iterations, too. This is +** important if loop-carried dependencies are kept in upvalues or tables. +** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may +** become a forwarded loop-recurrence after inlining. +** +** Since the IR is in SSA form, loop-carried dependencies have to be +** modeled with PHI instructions. The potential candidates for PHIs are +** collected on-the-fly during copy-substitution. After eliminating the +** redundant ones, PHI instructions are emitted *below* the loop body. +** +** Note that this departure from traditional SSA form doesn't change the +** semantics of the PHI instructions themselves. But it greatly simplifies +** on-the-fly generation of the IR and the machine code. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Emit raw IR without passing through optimizations. */ +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- PHI elimination ----------------------------------------------------- */ + +/* Emit or eliminate collected PHIs. */ +static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi, + SnapNo onsnap) +{ + int pass2 = 0; + IRRef i, nslots; + IRRef invar = J->chain[IR_LOOP]; + /* Pass #1: mark redundant and potentially redundant PHIs. */ + for (i = 0; i < nphi; i++) { + IRRef lref = phi[i]; + IRRef rref = subst[lref]; + if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */ + irt_setmark(IR(lref)->t); + } else if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) { + /* Quick check for simple recurrences failed, need pass2. */ + irt_setmark(IR(lref)->t); + pass2 = 1; + } + } + /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */ + if (pass2) { + SnapNo s; + for (i = J->cur.nins-1; i > invar; i--) { + IRIns *ir = IR(i); + if (!irref_isk(ir->op1)) irt_clearmark(IR(ir->op1)->t); + if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); + } + for (s = J->cur.nsnap-1; s >= onsnap; s--) { + SnapShot *snap = &J->cur.snap[s]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + for (n = 0; n < nent; n++) { + IRRef ref = snap_ref(map[n]); + if (!irref_isk(ref)) irt_clearmark(IR(ref)->t); + } + } + } + /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */ + nslots = J->baseslot+J->maxslot; + for (i = 1; i < nslots; i++) { + IRRef ref = tref_ref(J->slot[i]); + while (!irref_isk(ref) && ref != subst[ref]) { + IRIns *ir = IR(ref); + irt_clearmark(ir->t); /* Unmark potential uses, too. */ + if (irt_isphi(ir->t) || irt_ispri(ir->t)) + break; + irt_setphi(ir->t); + if (nphi >= LJ_MAX_PHI) + lj_trace_err(J, LJ_TRERR_PHIOV); + phi[nphi++] = (IRRef1)ref; + ref = subst[ref]; + if (ref > invar) + break; + } + } + /* Pass #4: emit PHI instructions or eliminate PHIs. */ + for (i = 0; i < nphi; i++) { + IRRef lref = phi[i]; + IRIns *ir = IR(lref); + if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */ + IRRef rref = subst[lref]; + if (rref > invar) + irt_setphi(IR(rref)->t); + emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref); + } else { /* Otherwise eliminate PHI. */ + irt_clearmark(ir->t); + irt_clearphi(ir->t); + } + } +} + +/* -- Loop unrolling using copy-substitution ------------------------------ */ + +/* Copy-substitute snapshot. */ +static void loop_subst_snap(jit_State *J, SnapShot *osnap, + SnapEntry *loopmap, IRRef1 *subst) +{ + SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs]; + MSize nmapofs, depth; + MSize on, ln, nn, onent = osnap->nent; + BCReg nslots = osnap->nslots; + SnapShot *snap = &J->cur.snap[J->cur.nsnap]; + if (irt_isguard(J->guardemit)) { /* Guard inbetween? */ + nmapofs = J->cur.nsnapmap; + J->cur.nsnap++; /* Add new snapshot. */ + } else { /* Otherwise overwrite previous snapshot. */ + snap--; + nmapofs = snap->mapofs; + } + J->guardemit.irt = 0; + depth = osnap->depth; + /* Setup new snapshot. */ + snap->mapofs = (uint16_t)nmapofs; + snap->ref = (IRRef1)J->cur.nins; + snap->depth = (uint8_t)depth; + snap->nslots = nslots; + snap->count = 0; + nmap = &J->cur.snapmap[nmapofs]; + /* Substitute snapshot slots. */ + on = ln = nn = 0; + while (on < onent) { + SnapEntry osn = omap[on], lsn = loopmap[ln]; + if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */ + nmap[nn++] = lsn; + ln++; + } else { /* Copy substituted slot from snapshot map. */ + if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */ + if (!irref_isk(snap_ref(osn))) + osn = snap_setref(osn, subst[snap_ref(osn)]); + nmap[nn++] = osn; + on++; + } + } + while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */ + nmap[nn++] = loopmap[ln++]; + snap->nent = (uint8_t)nn; + J->cur.nsnapmap = (uint16_t)(nmapofs + nn + 1 + depth); + omap += onent; + nmap += nn; + for (nn = 0; nn <= depth; nn++) /* Copy PC + frame links. */ + nmap[nn] = omap[nn]; +} + +/* Unroll loop. */ +static void loop_unroll(jit_State *J) +{ + IRRef1 phi[LJ_MAX_PHI]; + uint32_t nphi = 0; + IRRef1 *subst; + SnapNo onsnap; + SnapShot *osnap, *loopsnap; + SnapEntry *loopmap, *psentinel; + IRRef ins, invar; + + /* Use temp buffer for substitution table. + ** Only non-constant refs in [REF_BIAS,invar) are valid indexes. + ** Caveat: don't call into the VM or run the GC or the buffer may be gone. + */ + invar = J->cur.nins; + subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, + (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS; + subst[REF_BASE] = REF_BASE; + + /* LOOP separates the pre-roll from the loop body. */ + emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0); + + /* Grow snapshot buffer and map for copy-substituted snapshots. + ** Need up to twice the number of snapshots minus #0 and loop snapshot. + ** Need up to twice the number of entries plus fallback substitutions + ** from the loop snapshot entries for each new snapshot. + ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap! + */ + onsnap = J->cur.nsnap; + lj_snap_grow_buf(J, 2*onsnap-2); + lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent); + + /* The loop snapshot is used for fallback substitutions. */ + loopsnap = &J->cur.snap[onsnap-1]; + loopmap = &J->cur.snapmap[loopsnap->mapofs]; + /* The PC of snapshot #0 and the loop snapshot must match. */ + psentinel = &loopmap[loopsnap->nent]; + lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); + *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ + + /* Start substitution with snapshot #1 (#0 is empty for root traces). */ + osnap = &J->cur.snap[1]; + + /* Copy and substitute all recorded instructions and snapshots. */ + for (ins = REF_FIRST; ins < invar; ins++) { + IRIns *ir; + IRRef op1, op2; + + if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */ + loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */ + + /* Substitute instruction operands. */ + ir = IR(ins); + op1 = ir->op1; + if (!irref_isk(op1)) op1 = subst[op1]; + op2 = ir->op2; + if (!irref_isk(op2)) op2 = subst[op2]; + if (irm_kind(lj_ir_mode[ir->o]) == IRM_N && + op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */ + subst[ins] = (IRRef1)ins; /* Shortcut. */ + } else { + /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */ + IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */ + IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2)); + subst[ins] = (IRRef1)ref; + if (ref != ins && ref < invar) { /* Loop-carried dependency? */ + IRIns *irr = IR(ref); + /* Potential PHI? */ + if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) { + irt_setphi(irr->t); + if (nphi >= LJ_MAX_PHI) + lj_trace_err(J, LJ_TRERR_PHIOV); + phi[nphi++] = (IRRef1)ref; + } + /* Check all loop-carried dependencies for type instability. */ + if (!irt_sametype(t, irr->t)) { + if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */ + subst[ins] = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT)); + else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */ + subst[ins] = tref_ref(emitir(IRTGI(IR_CONV), ref, + IRCONV_INT_NUM|IRCONV_CHECK)); + else if (!(irt_isinteger(t) && irt_isinteger(irr->t))) + lj_trace_err(J, LJ_TRERR_TYPEINS); + } + } + } + } + if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ + J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs; + lua_assert(J->cur.nsnapmap <= J->sizesnapmap); + *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ + + loop_emit_phi(J, subst, phi, nphi, onsnap); +} + +/* Undo any partial changes made by the loop optimization. */ +static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap) +{ + ptrdiff_t i; + SnapShot *snap = &J->cur.snap[nsnap-1]; + SnapEntry *map = J->cur.snapmap; + map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */ + J->cur.nsnapmap = (uint16_t)(snap->mapofs + snap->nent + 1 + snap->depth); + J->cur.nsnap = nsnap; + J->guardemit.irt = 0; + lj_ir_rollback(J, ins); + for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */ + BPropEntry *bp = &J->bpropcache[i]; + if (bp->val >= ins) + bp->key = 0; + } + for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */ + IRIns *ir = IR(ins); + irt_clearphi(ir->t); + irt_clearmark(ir->t); + } +} + +/* Protected callback for loop optimization. */ +static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud) +{ + UNUSED(L); UNUSED(dummy); + loop_unroll((jit_State *)ud); + return NULL; +} + +/* Loop optimization. */ +int lj_opt_loop(jit_State *J) +{ + IRRef nins = J->cur.nins; + SnapNo nsnap = J->cur.nsnap; + int errcode = lj_vm_cpcall(J->L, NULL, J, cploop_opt); + if (LJ_UNLIKELY(errcode)) { + lua_State *L = J->L; + if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */ + int32_t e = numberVint(L->top-1); + switch ((TraceError)e) { + case LJ_TRERR_TYPEINS: /* Type instability. */ + case LJ_TRERR_GFAIL: /* Guard would always fail. */ + /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */ + if (--J->instunroll < 0) /* But do not unroll forever. */ + break; + L->top--; /* Remove error object. */ + loop_undo(J, nins, nsnap); + return 1; /* Loop optimization failed, continue recording. */ + default: + break; + } + } + lj_err_throw(L, errcode); /* Propagate all other errors. */ + } + return 0; /* Loop optimization is ok. */ +} + +#undef IR +#undef emitir +#undef emitir_raw + +#endif diff --git a/src/luajit2/src/lj_opt_mem.c b/src/luajit2/src/lj_opt_mem.c new file mode 100644 index 0000000000..400646397a --- /dev/null +++ b/src/luajit2/src/lj_opt_mem.c @@ -0,0 +1,861 @@ +/* +** Memory access optimizations. +** AA: Alias Analysis using high-level semantic disambiguation. +** FWD: Load Forwarding (L2L) + Store Forwarding (S2L). +** DSE: Dead-Store Elimination. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_mem_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_tab.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) +#define fright (&J->fold.right) + +/* +** Caveat #1: return value is not always a TRef -- only use with tref_ref(). +** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold(). +*/ + +/* Return values from alias analysis. */ +typedef enum { + ALIAS_NO, /* The two refs CANNOT alias (exact). */ + ALIAS_MAY, /* The two refs MAY alias (inexact). */ + ALIAS_MUST /* The two refs MUST alias (exact). */ +} AliasRet; + +/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */ + +/* Simplified escape analysis: check for intervening stores. */ +static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop) +{ + IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */ + for (ir++; ir < stop; ir++) + if (ir->op2 == ref && + (ir->o == IR_ASTORE || ir->o == IR_HSTORE || + ir->o == IR_USTORE || ir->o == IR_FSTORE)) + return ALIAS_MAY; /* Reference was stored and might alias. */ + return ALIAS_NO; /* Reference was not stored. */ +} + +/* Alias analysis for two different table references. */ +static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb) +{ + IRIns *taba = IR(ta), *tabb = IR(tb); + int newa, newb; + lua_assert(ta != tb); + lua_assert(irt_istab(taba->t) && irt_istab(tabb->t)); + /* Disambiguate new allocations. */ + newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); + newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); + if (newa && newb) + return ALIAS_NO; /* Two different allocations never alias. */ + if (newb) { /* At least one allocation? */ + IRIns *tmp = taba; taba = tabb; tabb = tmp; + } else if (!newa) { + return ALIAS_MAY; /* Anything else: we just don't know. */ + } + return aa_escape(J, taba, tabb); +} + +/* Alias analysis for array and hash access using key-based disambiguation. */ +static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb) +{ + IRRef ka = refa->op2; + IRRef kb = refb->op2; + IRIns *keya, *keyb; + IRRef ta, tb; + if (refa == refb) + return ALIAS_MUST; /* Shortcut for same refs. */ + keya = IR(ka); + if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); } + keyb = IR(kb); + if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); } + ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1; + tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1; + if (ka == kb) { + /* Same key. Check for same table with different ref (NEWREF vs. HREF). */ + if (ta == tb) + return ALIAS_MUST; /* Same key, same table. */ + else + return aa_table(J, ta, tb); /* Same key, possibly different table. */ + } + if (irref_isk(ka) && irref_isk(kb)) + return ALIAS_NO; /* Different constant keys. */ + if (refa->o == IR_AREF) { + /* Disambiguate array references based on index arithmetic. */ + int32_t ofsa = 0, ofsb = 0; + IRRef basea = ka, baseb = kb; + lua_assert(refb->o == IR_AREF); + /* Gather base and offset from t[base] or t[base+-ofs]. */ + if (keya->o == IR_ADD && irref_isk(keya->op2)) { + basea = keya->op1; + ofsa = IR(keya->op2)->i; + if (basea == kb && ofsa != 0) + return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */ + } + if (keyb->o == IR_ADD && irref_isk(keyb->op2)) { + baseb = keyb->op1; + ofsb = IR(keyb->op2)->i; + if (ka == baseb && ofsb != 0) + return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */ + } + if (basea == baseb && ofsa != ofsb) + return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ + } else { + /* Disambiguate hash references based on the type of their keys. */ + lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && + (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF)); + if (!irt_sametype(keya->t, keyb->t)) + return ALIAS_NO; /* Different key types. */ + } + if (ta == tb) + return ALIAS_MAY; /* Same table, cannot disambiguate keys. */ + else + return aa_table(J, ta, tb); /* Try to disambiguate tables. */ +} + +/* Array and hash load forwarding. */ +static TRef fwd_ahload(jit_State *J, IRRef xref) +{ + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[fins->o+IRDELTA_L2S]; + while (ref > xref) { + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + + /* No conflicting store (yet): const-fold loads from allocations. */ + { + IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr; + IRRef tab = ir->op1; + ir = IR(tab); + if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) { + /* A NEWREF with a number key may end up pointing to the array part. + ** But it's referenced from HSTORE and not found in the ASTORE chain. + ** For now simply consider this a conflict without forwarding anything. + */ + if (xr->o == IR_AREF) { + IRRef ref2 = J->chain[IR_NEWREF]; + while (ref2 > tab) { + IRIns *newref = IR(ref2); + if (irt_isnum(IR(newref->op2)->t)) + goto cselim; + ref2 = newref->prev; + } + } + /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF. + ** But the above search for conflicting stores was limited by xref. + ** So continue searching, limited by the TNEW/TDUP. Store forwarding + ** is ok, too. A conflict does NOT limit the search for a matching load. + */ + while (ref > tab) { + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: goto cselim; /* Conflicting store. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t)); + if (irt_ispri(fins->t)) { + return TREF_PRI(irt_type(fins->t)); + } else if (irt_isnum(fins->t) || irt_isstr(fins->t)) { + TValue keyv; + cTValue *tv; + IRIns *key = IR(xr->op2); + if (key->o == IR_KSLOT) key = IR(key->op1); + lj_ir_kvalue(J->L, &keyv, key); + tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); + lua_assert(itype2irt(tv) == irt_type(fins->t)); + if (irt_isnum(fins->t)) + return lj_ir_knum_u64(J, tv->u64); + else + return lj_ir_kstr(J, strV(tv)); + } + /* Othwerwise: don't intern as a constant. */ + } + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + ref = J->chain[fins->o]; + while (ref > lim) { + IRIns *load = IR(ref); + if (load->op1 == xref) + return ref; /* Load forwarding. */ + ref = load->prev; + } + return 0; /* Conflict or no match. */ +} + +/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */ +static TRef fwd_aload_reassoc(jit_State *J) +{ + IRIns *irx = IR(fins->op1); + IRIns *key = IR(irx->op2); + if (key->o == IR_ADD && irref_isk(key->op2)) { + IRIns *add2 = IR(key->op1); + if (add2->o == IR_ADD && irref_isk(add2->op2) && + IR(key->op2)->i == -IR(add2->op2)->i) { + IRRef ref = J->chain[IR_AREF]; + IRRef lim = add2->op1; + if (irx->op1 > lim) lim = irx->op1; + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == irx->op1 && ir->op2 == add2->op1) + return fwd_ahload(J, ref); + ref = ir->prev; + } + } + } + return 0; +} + +/* ALOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J) +{ + IRRef ref; + if ((ref = fwd_ahload(J, fins->op1)) || + (ref = fwd_aload_reassoc(J))) + return ref; + return EMITFOLD; +} + +/* HLOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J) +{ + IRRef ref = fwd_ahload(J, fins->op1); + if (ref) + return ref; + return EMITFOLD; +} + +/* Check whether HREF of TNEW/TDUP can be folded to niltv. */ +int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J) +{ + IRRef lim = fins->op1; /* Search limit. */ + IRRef ref; + + /* The key for an ASTORE may end up in the hash part after a NEWREF. */ + if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) { + ref = J->chain[IR_ASTORE]; + while (ref > lim) { + if (ref < J->chain[IR_NEWREF]) + return 0; /* Conflict. */ + ref = IR(ref)->prev; + } + } + + /* Search for conflicting stores. */ + ref = J->chain[IR_HSTORE]; + while (ref > lim) { + IRIns *store = IR(ref); + if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO) + return 0; /* Conflict. */ + ref = store->prev; + } + + return 1; /* No conflict. Can fold to niltv. */ +} + +/* Check whether there's no aliasing NEWREF for the left operand. */ +int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim) +{ + IRRef ta = fins->op1; + IRRef ref = J->chain[IR_NEWREF]; + while (ref > lim) { + IRIns *newref = IR(ref); + if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO) + return 0; /* Conflict. */ + ref = newref->prev; + } + return 1; /* No conflict. Can safely FOLD/CSE. */ +} + +/* ASTORE/HSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J) +{ + IRRef xref = fins->op1; /* xREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(xref); + IRRef1 *refp = &J->chain[fins->o]; + IRRef ref = *refp; + while (ref > xref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_ahref(J, xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: /* Store to MAYBE the same location. */ + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: /* Store to the same location. */ + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards (includes conflicting loads). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t)) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; /* Unchained NOP -- does anybody care? */ + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- ULOAD forwarding ---------------------------------------------------- */ + +/* The current alias analysis for upvalues is very simplistic. It only +** disambiguates between the unique upvalues of the same function. +** This is good enough for now, since most upvalues are read-only. +** +** A more precise analysis would be feasible with the help of the parser: +** generate a unique key for every upvalue, even across all prototypes. +** Lacking a realistic use-case, it's unclear whether this is beneficial. +*/ +static AliasRet aa_uref(IRIns *refa, IRIns *refb) +{ + if (refa->o != refb->o) + return ALIAS_NO; /* Different UREFx type. */ + if (refa->op1 == refb->op1) { /* Same function. */ + if (refa->op2 == refb->op2) + return ALIAS_MUST; /* Same function, same upvalue idx. */ + else + return ALIAS_NO; /* Same function, different upvalue idx. */ + } else { /* Different functions, check disambiguation hash values. */ + if (((refa->op2 ^ refb->op2) & 0xff)) + return ALIAS_NO; /* Upvalues with different hash values cannot alias. */ + else + return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */ + } +} + +/* ULOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J) +{ + IRRef uref = fins->op1; + IRRef lim = uref; /* Search limit. */ + IRIns *xr = IR(uref); + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[IR_USTORE]; + while (ref > uref) { + IRIns *store = IR(ref); + switch (aa_uref(xr, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + return lj_opt_cselim(J, lim); +} + +/* USTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J) +{ + IRRef xref = fins->op1; /* xREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(xref); + IRRef1 *refp = &J->chain[IR_USTORE]; + IRRef ref = *refp; + while (ref > xref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_uref(xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: /* Store to MAYBE the same location. */ + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: /* Store to the same location. */ + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards (includes conflicting loads). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t)) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; /* Unchained NOP -- does anybody care? */ + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */ + +/* Alias analysis for field access. +** Field loads are cheap and field stores are rare. +** Simple disambiguation based on field types is good enough. +*/ +static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb) +{ + if (refa->op2 != refb->op2) + return ALIAS_NO; /* Different fields. */ + if (refa->op1 == refb->op1) + return ALIAS_MUST; /* Same field, same object. */ + else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM) + return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */ + else + return ALIAS_MAY; /* Same field, possibly different object. */ +} + +/* Only the loads for mutable fields end up here (see FOLD). */ +TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J) +{ + IRRef oref = fins->op1; /* Object reference. */ + IRRef fid = fins->op2; /* Field ID. */ + IRRef lim = oref; /* Search limit. */ + IRRef ref; + + /* Search for conflicting stores. */ + ref = J->chain[IR_FSTORE]; + while (ref > oref) { + IRIns *store = IR(ref); + switch (aa_fref(J, fins, IR(store->op1))) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + + /* No conflicting store: const-fold field loads from allocations. */ + if (fid == IRFL_TAB_META) { + IRIns *ir = IR(oref); + if (ir->o == IR_TNEW || ir->o == IR_TDUP) + return lj_ir_knull(J, IRT_TAB); + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + return lj_opt_cselim(J, lim); +} + +/* FSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J) +{ + IRRef fref = fins->op1; /* FREF reference. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRIns *xr = IR(fref); + IRRef1 *refp = &J->chain[IR_FSTORE]; + IRRef ref = *refp; + while (ref > fref) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_fref(J, xr, IR(store->op1))) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards or conflicting loads. */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2)) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; /* Unchained NOP -- does anybody care? */ + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */ + +/* Find cdata allocation for a reference (if any). */ +static IRIns *aa_findcnew(jit_State *J, IRIns *ir) +{ + while (ir->o == IR_ADD) { + if (!irref_isk(ir->op1)) { + IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */ + if (ir1) return ir1; + } + if (irref_isk(ir->op2)) return NULL; + ir = IR(ir->op2); /* Flatten right-recursion. */ + } + return ir->o == IR_CNEW ? ir : NULL; +} + +/* Alias analysis for two cdata allocations. */ +static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb) +{ + IRIns *cnewa = aa_findcnew(J, refa); + IRIns *cnewb = aa_findcnew(J, refb); + if (cnewa == cnewb) + return ALIAS_MAY; /* Same allocation or neither is an allocation. */ + if (cnewa && cnewb) + return ALIAS_NO; /* Two different allocations never alias. */ + if (cnewb) { cnewa = cnewb; refb = refa; } + return aa_escape(J, cnewa, refb); +} + +/* Alias analysis for XLOAD/XSTORE. */ +static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb) +{ + ptrdiff_t ofsa = 0, ofsb = 0; + IRIns *refb = IR(xb->op1); + IRIns *basea = refa, *baseb = refb; + /* This implements (very) strict aliasing rules. + ** Different types do NOT alias, except for differences in signedness. + ** NYI: this also prevents type punning through unions. + */ + if (irt_sametype(xa->t, xb->t)) { + if (refa == refb) + return ALIAS_MUST; /* Shortcut for same refs with identical type. */ + } else if (!(irt_typerange(xa->t, IRT_I8, IRT_U64) && + ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1)) { + return ALIAS_NO; + } + /* Offset-based disambiguation. */ + if (refa->o == IR_ADD && irref_isk(refa->op2)) { + IRIns *irk = IR(refa->op2); + basea = IR(refa->op1); + ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + if (basea == refb && ofsa != 0) + return ALIAS_NO; /* base+-ofs vs. base. */ + } + if (refb->o == IR_ADD && irref_isk(refb->op2)) { + IRIns *irk = IR(refb->op2); + baseb = IR(refb->op1); + ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + if (refa == baseb && ofsb != 0) + return ALIAS_NO; /* base vs. base+-ofs. */ + } + if (basea == baseb) { + /* This assumes strictly-typed, non-overlapping accesses. */ + if (ofsa != ofsb) + return ALIAS_NO; /* base+-o1 vs. base+-o2 and o1 != o2. */ + return ALIAS_MUST; /* Unsigned vs. signed access to the same address. */ + } + /* NYI: structural disambiguation. */ + return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */ +} + +/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */ +static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2) +{ + IRRef ref = J->chain[op]; + IRRef lim = op1; + if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; } + while (ref > lim) { + IRIns *ir = IR(ref); + if (ir->op1 == op1 && ir->op2 == op2) + return ref; + ref = ir->prev; + } + return 0; +} + +/* Reassociate index references. */ +static IRRef reassoc_xref(jit_State *J, IRIns *ir) +{ + ptrdiff_t ofs = 0; + if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */ + IRIns *irk = IR(ir->op2); + ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 : + (ptrdiff_t)irk->i; + ir = IR(ir->op1); + } + if (ir->o == IR_ADD) { /* Add of base + index. */ + /* Index ref > base ref for loop-carried dependences. Only check op1. */ + IRIns *ir2, *ir1 = IR(ir->op1); + int32_t shift = 0; + IRRef idxref; + /* Determine index shifts. Don't bother with IR_MUL here. */ + if (ir1->o == IR_BSHL && irref_isk(ir1->op2)) + shift = IR(ir1->op2)->i; + else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2) + shift = 1; + else + ir1 = ir; + ir2 = IR(ir1->op1); + /* A non-reassociated add. Must be a loop-carried dependence. */ + if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2)) + ofs += (ptrdiff_t)IR(ir2->op2)->i << shift; + else + return 0; + idxref = ir2->op1; + /* Try to CSE the reassociated chain. Give up if not found. */ + if (ir1 != ir && + !(idxref = reassoc_trycse(J, ir1->o, idxref, + ir1->o == IR_BSHL ? ir1->op2 : idxref))) + return 0; + if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2))) + return 0; + if (ofs != 0) { + IRRef refk = tref_ref(lj_ir_kintp(J, ofs)); + if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk))) + return 0; + } + return idxref; /* Success, found a reassociated index reference. Phew. */ + } + return 0; /* Failure. */ +} + +/* XLOAD forwarding. */ +TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J) +{ + IRRef xref = fins->op1; + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef ref; + + if ((fins->op2 & IRXLOAD_READONLY)) + goto cselim; + if ((fins->op2 & IRXLOAD_VOLATILE)) + goto doemit; + + /* Search for conflicting stores. */ + ref = J->chain[IR_XSTORE]; +retry: + if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; + if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; + while (ref > lim) { + IRIns *store = IR(ref); + switch (aa_xref(J, xr, fins, store)) { + case ALIAS_NO: break; /* Continue searching. */ + case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ + case ALIAS_MUST: + /* Emit conversion if the loaded type doesn't match the forwarded type. */ + if (!irt_sametype(fins->t, IR(store->op2)->t)) { + IRType st = irt_type(fins->t); + if (st == IRT_I8 || st == IRT_I16) { /* Trunc + sign-extend. */ + st |= IRCONV_SEXT; + } else if (st == IRT_U8 || st == IRT_U16) { /* Trunc + zero-extend. */ + } else if (st == IRT_INT && !irt_isint(IR(store->op2)->t)) { + st = irt_type(IR(store->op2)->t); /* Needs dummy CONV.int.*. */ + } else { /* I64/U64 are boxed, U32 is hidden behind a CONV.num.u32. */ + goto store_fwd; + } + fins->ot = IRTI(IR_CONV); + fins->op1 = store->op2; + fins->op2 = (IRT_INT<<5)|st; + return RETRYFOLD; + } + store_fwd: + return store->op2; /* Store forwarding. */ + } + ref = store->prev; + } + +cselim: + /* Try to find a matching load. Below the conflicting store, if any. */ + ref = J->chain[IR_XLOAD]; + while (ref > lim) { + /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */ + if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t)) + return ref; + ref = IR(ref)->prev; + } + + /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */ + if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] && + xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) { + ref = J->chain[IR_XSTORE]; + while (ref > lim) /* Skip stores that have already been checked. */ + ref = IR(ref)->prev; + lim = xref; + xr = IR(xref); + goto retry; /* Retry with the reassociated reference. */ + } +doemit: + return EMITFOLD; +} + +/* XSTORE elimination. */ +TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J) +{ + IRRef xref = fins->op1; + IRIns *xr = IR(xref); + IRRef lim = xref; /* Search limit. */ + IRRef val = fins->op2; /* Stored value reference. */ + IRRef1 *refp = &J->chain[IR_XSTORE]; + IRRef ref = *refp; + if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; + if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; + while (ref > lim) { /* Search for redundant or conflicting stores. */ + IRIns *store = IR(ref); + switch (aa_xref(J, xr, fins, store)) { + case ALIAS_NO: + break; /* Continue searching. */ + case ALIAS_MAY: + if (store->op2 != val) /* Conflict if the value is different. */ + goto doemit; + break; /* Otherwise continue searching. */ + case ALIAS_MUST: + if (store->op2 == val) /* Same value: drop the new store. */ + return DROPFOLD; + /* Different value: try to eliminate the redundant store. */ + if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */ + IRIns *ir; + /* Check for any intervening guards or any XLOADs (no AA performed). */ + for (ir = IR(J->cur.nins-1); ir > store; ir--) + if (irt_isguard(ir->t) || ir->o == IR_XLOAD) + goto doemit; /* No elimination possible. */ + /* Remove redundant store from chain and replace with NOP. */ + *refp = store->prev; + store->o = IR_NOP; /* Unchained NOP -- does anybody care? */ + store->t.irt = IRT_NIL; + store->op1 = store->op2 = 0; + store->prev = 0; + /* Now emit the new store instead. */ + } + goto doemit; + } + ref = *(refp = &store->prev); + } +doemit: + return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ +} + +/* -- Forwarding of lj_tab_len -------------------------------------------- */ + +/* This is rather simplistic right now, but better than nothing. */ +TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J) +{ + IRRef tab = fins->op1; /* Table reference. */ + IRRef lim = tab; /* Search limit. */ + IRRef ref; + + /* Any ASTORE is a conflict and limits the search. */ + if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE]; + + /* Search for conflicting HSTORE with numeric key. */ + ref = J->chain[IR_HSTORE]; + while (ref > lim) { + IRIns *store = IR(ref); + IRIns *href = IR(store->op1); + IRIns *key = IR(href->op2); + if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) { + lim = ref; /* Conflicting store found, limits search for TLEN. */ + break; + } + ref = store->prev; + } + + /* Try to find a matching load. Below the conflicting store, if any. */ + return lj_opt_cselim(J, lim); +} + +/* -- ASTORE/HSTORE previous type analysis -------------------------------- */ + +/* Check whether the previous value for a table store is non-nil. +** This can be derived either from a previous store or from a previous +** load (because all loads from tables perform a type check). +** +** The result of the analysis can be used to avoid the metatable check +** and the guard against HREF returning niltv. Both of these are cheap, +** so let's not spend too much effort on the analysis. +** +** A result of 1 is exact: previous value CANNOT be nil. +** A result of 0 is inexact: previous value MAY be nil. +*/ +int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref) +{ + /* First check stores. */ + IRRef ref = J->chain[loadop+IRDELTA_L2S]; + while (ref > xref) { + IRIns *store = IR(ref); + if (store->op1 == xref) { /* Same xREF. */ + /* A nil store MAY alias, but a non-nil store MUST alias. */ + return !irt_isnil(store->t); + } else if (irt_isnil(store->t)) { /* Must check any nil store. */ + IRRef skref = IR(store->op1)->op2; + IRRef xkref = IR(xref)->op2; + /* Same key type MAY alias. Need ALOAD check due to multiple int types. */ + if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) { + if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref)) + return 0; /* A nil store with same const key or var key MAY alias. */ + /* Different const keys CANNOT alias. */ + } /* Different key types CANNOT alias. */ + } /* Other non-nil stores MAY alias. */ + ref = store->prev; + } + + /* Check loads since nothing could be derived from stores. */ + ref = J->chain[loadop]; + while (ref > xref) { + IRIns *load = IR(ref); + if (load->op1 == xref) { /* Same xREF. */ + /* A nil load MAY alias, but a non-nil load MUST alias. */ + return !irt_isnil(load->t); + } /* Other non-nil loads MAY alias. */ + ref = load->prev; + } + return 0; /* Nothing derived at all, previous value MAY be nil. */ +} + +/* ------------------------------------------------------------------------ */ + +#undef IR +#undef fins +#undef fright + +#endif diff --git a/src/luajit2/src/lj_opt_narrow.c b/src/luajit2/src/lj_opt_narrow.c new file mode 100644 index 0000000000..40696c02e4 --- /dev/null +++ b/src/luajit2/src/lj_opt_narrow.c @@ -0,0 +1,647 @@ +/* +** NARROW: Narrowing of numbers to integers (double to int32_t). +** STRIPOV: Stripping of overflow checks. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_narrow_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_str.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_vm.h" + +/* Rationale for narrowing optimizations: +** +** Lua has only a single number type and this is a FP double by default. +** Narrowing doubles to integers does not pay off for the interpreter on a +** current-generation x86/x64 machine. Most FP operations need the same +** amount of execution resources as their integer counterparts, except +** with slightly longer latencies. Longer latencies are a non-issue for +** the interpreter, since they are usually hidden by other overhead. +** +** The total CPU execution bandwidth is the sum of the bandwidth of the FP +** and the integer units, because they execute in parallel. The FP units +** have an equal or higher bandwidth than the integer units. Not using +** them means losing execution bandwidth. Moving work away from them to +** the already quite busy integer units is a losing proposition. +** +** The situation for JIT-compiled code is a bit different: the higher code +** density makes the extra latencies much more visible. Tight loops expose +** the latencies for updating the induction variables. Array indexing +** requires narrowing conversions with high latencies and additional +** guards (to check that the index is really an integer). And many common +** optimizations only work on integers. +** +** One solution would be speculative, eager narrowing of all number loads. +** This causes many problems, like losing -0 or the need to resolve type +** mismatches between traces. It also effectively forces the integer type +** to have overflow-checking semantics. This impedes many basic +** optimizations and requires adding overflow checks to all integer +** arithmetic operations (whereas FP arithmetics can do without). +** +** Always replacing an FP op with an integer op plus an overflow check is +** counter-productive on a current-generation super-scalar CPU. Although +** the overflow check branches are highly predictable, they will clog the +** execution port for the branch unit and tie up reorder buffers. This is +** turning a pure data-flow dependency into a different data-flow +** dependency (with slightly lower latency) *plus* a control dependency. +** In general, you don't want to do this since latencies due to data-flow +** dependencies can be well hidden by out-of-order execution. +** +** A better solution is to keep all numbers as FP values and only narrow +** when it's beneficial to do so. LuaJIT uses predictive narrowing for +** induction variables and demand-driven narrowing for index expressions, +** integer arguments and bit operations. Additionally it can eliminate or +** hoist most of the resulting overflow checks. Regular arithmetic +** computations are never narrowed to integers. +** +** The integer type in the IR has convenient wrap-around semantics and +** ignores overflow. Extra operations have been added for +** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type. +** Apart from reducing overall complexity of the compiler, this also +** nicely solves the problem where you want to apply algebraic +** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can +** use lea instead of an add for integer ADD, but not for ADDOV (lea does +** not affect the flags, but it helps to avoid register moves). +** +** +** All of the above has to be reconsidered for architectures with slow FP +** operations or without a hardware FPU. The dual-number mode of LuaJIT +** addresses this issue. Arithmetic operations are performed on integers +** as far as possible and overflow checks are added as needed. +** +** This implies that narrowing for integer arguments and bit operations +** should also strip overflow checks, e.g. replace ADDOV with ADD. The +** original overflow guards are weak and can be eliminated by DCE, if +** there's no other use. +** +** A slight twist is that it's usually beneficial to use overflow-checked +** integer arithmetics if all inputs are already integers. This is the only +** change that affects the single-number mode, too. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) +#define fins (&J->fold.ins) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- Elimination of narrowing type conversions --------------------------- */ + +/* Narrowing of index expressions and bit operations is demand-driven. The +** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT) +** in all of these cases (e.g. array indexing or string indexing). FOLD +** already takes care of eliminating simple redundant conversions like +** CONV.int.num(CONV.num.int(x)) ==> x. +** +** But the surrounding code is FP-heavy and arithmetic operations are +** performed on FP numbers (for the single-number mode). Consider a common +** example such as 'x=t[i+1]', with 'i' already an integer (due to induction +** variable narrowing). The index expression would be recorded as +** CONV.int.num(ADD(CONV.num.int(i), 1)) +** which is clearly suboptimal. +** +** One can do better by recursively backpropagating the narrowing type +** conversion across FP arithmetic operations. This turns FP ops into +** their corresponding integer counterparts. Depending on the semantics of +** the conversion they also need to check for overflow. Currently only ADD +** and SUB are supported. +** +** The above example can be rewritten as +** ADDOV(CONV.int.num(CONV.num.int(i)), 1) +** and then into ADDOV(i, 1) after folding of the conversions. The original +** FP ops remain in the IR and are eliminated by DCE since all references to +** them are gone. +** +** [In dual-number mode the trace recorder already emits ADDOV etc., but +** this can be further reduced. See below.] +** +** Special care has to be taken to avoid narrowing across an operation +** which is potentially operating on non-integral operands. One obvious +** case is when an expression contains a non-integral constant, but ends +** up as an integer index at runtime (like t[x+1.5] with x=0.5). +** +** Operations with two non-constant operands illustrate a similar problem +** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there, +** unless it can be proven that either operand is integral (e.g. by CSEing +** a previous conversion). As a not-so-obvious corollary this logic also +** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]). +** +** Correctness of the transformation is guaranteed by avoiding to expand +** the tree by adding more conversions than the one we would need to emit +** if not backpropagating. TOBIT employs a more optimistic rule, because +** the conversion has special semantics, designed to make the life of the +** compiler writer easier. ;-) +** +** Using on-the-fly backpropagation of an expression tree doesn't work +** because it's unknown whether the transform is correct until the end. +** This either requires IR rollback and cache invalidation for every +** subtree or a two-pass algorithm. The former didn't work out too well, +** so the code now combines a recursive collector with a stack-based +** emitter. +** +** [A recursive backpropagation algorithm with backtracking, employing +** skip-list lookup and round-robin caching, emitting stack operations +** on-the-fly for a stack-based interpreter -- and all of that in a meager +** kilobyte? Yep, compilers are a great treasure chest. Throw away your +** textbooks and read the codebase of a compiler today!] +** +** There's another optimization opportunity for array indexing: it's +** always accompanied by an array bounds-check. The outermost overflow +** check may be delegated to the ABC operation. This works because ABC is +** an unsigned comparison and wrap-around due to overflow creates negative +** numbers. +** +** But this optimization is only valid for constants that cannot overflow +** an int32_t into the range of valid array indexes [0..2^27+1). A check +** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30 +** wraps to -2^30-1. +** +** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are +** quite common. So the above example finally ends up as ADD(i, 1)! +** +** Later on, the assembler is able to fuse the whole array reference and +** the ADD into the memory operands of loads and other instructions. This +** is why LuaJIT is able to generate very pretty (and fast) machine code +** for array indexing. And that, my dear, concludes another story about +** one of the hidden secrets of LuaJIT ... +*/ + +/* Maximum backpropagation depth and maximum stack size. */ +#define NARROW_MAX_BACKPROP 100 +#define NARROW_MAX_STACK 256 + +/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1] +** The lower 16 bits hold a reference (or 0). The upper 16 bits hold +** the IR opcode + type or one of the following special opcodes: +*/ +enum { + NARROW_REF, /* Push ref. */ + NARROW_CONV, /* Push conversion of ref. */ + NARROW_SEXT, /* Push sign-extension of ref. */ + NARROW_INT /* Push KINT ref. The next code holds an int32_t. */ +}; + +typedef uint32_t NarrowIns; + +#define NARROWINS(op, ref) (((op) << 16) + (ref)) +#define narrow_op(ins) ((IROpT)((ins) >> 16)) +#define narrow_ref(ins) ((IRRef1)(ins)) + +/* Context used for narrowing of type conversions. */ +typedef struct NarrowConv { + jit_State *J; /* JIT compiler state. */ + NarrowIns *sp; /* Current stack pointer. */ + NarrowIns *maxsp; /* Maximum stack pointer minus redzone. */ + int lim; /* Limit on the number of emitted conversions. */ + IRRef mode; /* Conversion mode (IRCONV_*). */ + IRType t; /* Destination type: IRT_INT or IRT_I64. */ + NarrowIns stack[NARROW_MAX_STACK]; /* Stack holding stack-machine code. */ +} NarrowConv; + +/* Lookup a reference in the backpropagation cache. */ +static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode) +{ + ptrdiff_t i; + for (i = 0; i < BPROP_SLOTS; i++) { + BPropEntry *bp = &J->bpropcache[i]; + /* Stronger checks are ok, too. */ + if (bp->key == key && bp->mode >= mode && + ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0) + return bp; + } + return NULL; +} + +/* Add an entry to the backpropagation cache. */ +static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode) +{ + uint32_t slot = J->bpropslot; + BPropEntry *bp = &J->bpropcache[slot]; + J->bpropslot = (slot + 1) & (BPROP_SLOTS-1); + bp->key = key; + bp->val = val; + bp->mode = mode; +} + +/* Backpropagate overflow stripping. */ +static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth) +{ + jit_State *J = nc->J; + IRIns *ir = IR(ref); + if (ir->o == IR_ADDOV || ir->o == IR_SUBOV || + (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) { + BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT); + if (bp) { + ref = bp->val; + } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { + narrow_stripov_backprop(nc, ir->op1, depth); + narrow_stripov_backprop(nc, ir->op2, depth); + *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref); + return; + } + } + *nc->sp++ = NARROWINS(NARROW_REF, ref); +} + +/* Backpropagate narrowing conversion. Return number of needed conversions. */ +static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth) +{ + jit_State *J = nc->J; + IRIns *ir = IR(ref); + IRRef cref; + + /* Check the easy cases first. */ + if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) { + if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY) + narrow_stripov_backprop(nc, ir->op1, depth+1); + else + *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); /* Undo conversion. */ + if (nc->t == IRT_I64) + *nc->sp++ = NARROWINS(NARROW_SEXT, 0); /* Sign-extend integer. */ + return 0; + } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */ + lua_Number n = ir_knum(ir)->n; + if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) { + /* Allows a wider range of constants. */ + int64_t k64 = (int64_t)n; + if (n == (lua_Number)k64) { /* Only if const doesn't lose precision. */ + *nc->sp++ = NARROWINS(NARROW_INT, 0); + *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */ + return 0; + } + } else { + int32_t k = lj_num2int(n); + if (n == (lua_Number)k) { /* Only if constant is really an integer. */ + *nc->sp++ = NARROWINS(NARROW_INT, 0); + *nc->sp++ = (NarrowIns)k; + return 0; + } + } + return 10; /* Never narrow other FP constants (this is rare). */ + } + + /* Try to CSE the conversion. Stronger checks are ok, too. */ + cref = J->chain[fins->o]; + while (cref > ref) { + IRIns *cr = IR(cref); + if (cr->op1 == ref && + (fins->o == IR_TOBIT || + ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) && + irt_isguard(cr->t) >= irt_isguard(fins->t)))) { + *nc->sp++ = NARROWINS(NARROW_REF, cref); + return 0; /* Already there, no additional conversion needed. */ + } + cref = cr->prev; + } + + /* Backpropagate across ADD/SUB. */ + if (ir->o == IR_ADD || ir->o == IR_SUB) { + /* Try cache lookup first. */ + IRRef mode = nc->mode; + BPropEntry *bp; + /* Inner conversions need a stronger check. */ + if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0) + mode += IRCONV_CHECK-IRCONV_INDEX; + bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); + if (bp) { + *nc->sp++ = NARROWINS(NARROW_REF, bp->val); + return 0; + } else if (nc->t == IRT_I64) { + /* Try sign-extending from an existing (checked) conversion to int. */ + mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX; + bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode); + if (bp) { + *nc->sp++ = NARROWINS(NARROW_REF, bp->val); + *nc->sp++ = NARROWINS(NARROW_SEXT, 0); + return 0; + } + } + if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { + NarrowIns *savesp = nc->sp; + int count = narrow_conv_backprop(nc, ir->op1, depth); + count += narrow_conv_backprop(nc, ir->op2, depth); + if (count <= nc->lim) { /* Limit total number of conversions. */ + *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref); + return count; + } + nc->sp = savesp; /* Too many conversions, need to backtrack. */ + } + } + + /* Otherwise add a conversion. */ + *nc->sp++ = NARROWINS(NARROW_CONV, ref); + return 1; +} + +/* Emit the conversions collected during backpropagation. */ +static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc) +{ + /* The fins fields must be saved now -- emitir() overwrites them. */ + IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0; + IROpT convot = fins->ot; + IRRef1 convop2 = fins->op2; + NarrowIns *next = nc->stack; /* List of instructions from backpropagation. */ + NarrowIns *last = nc->sp; + NarrowIns *sp = nc->stack; /* Recycle the stack to store operands. */ + while (next < last) { /* Simple stack machine to process the ins. list. */ + NarrowIns ref = *next++; + IROpT op = narrow_op(ref); + if (op == NARROW_REF) { + *sp++ = ref; + } else if (op == NARROW_CONV) { + *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ + } else if (op == NARROW_SEXT) { + lua_assert(sp >= nc->stack+1); + sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], + (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); + } else if (op == NARROW_INT) { + lua_assert(next < last); + *sp++ = nc->t == IRT_I64 ? + lj_ir_kint64(J, (int64_t)(int32_t)*next++) : + lj_ir_kint(J, *next++); + } else { /* Regular IROpT. Pops two operands and pushes one result. */ + IRRef mode = nc->mode; + lua_assert(sp >= nc->stack+2); + sp--; + /* Omit some overflow checks for array indexing. See comments above. */ + if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { + if (next == last && irref_isk(narrow_ref(sp[0])) && + (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u) + guardot = 0; + else /* Otherwise cache a stronger check. */ + mode += IRCONV_CHECK-IRCONV_INDEX; + } + sp[-1] = emitir(op+guardot, sp[-1], sp[0]); + /* Add to cache. */ + if (narrow_ref(ref)) + narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); + } + } + lua_assert(sp == nc->stack+1); + return nc->stack[0]; +} + +/* Narrow a type conversion of an arithmetic operation. */ +TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J) +{ + if ((J->flags & JIT_F_OPT_NARROW)) { + NarrowConv nc; + nc.J = J; + nc.sp = nc.stack; + nc.maxsp = &nc.stack[NARROW_MAX_STACK-4]; + nc.t = irt_type(fins->t); + if (fins->o == IR_TOBIT) { + nc.mode = IRCONV_TOBIT; /* Used only in the backpropagation cache. */ + nc.lim = 2; /* TOBIT can use a more optimistic rule. */ + } else { + nc.mode = fins->op2; + nc.lim = 1; + } + if (narrow_conv_backprop(&nc, fins->op1, 0) <= nc.lim) + return narrow_conv_emit(J, &nc); + } + return NEXTFOLD; +} + +/* -- Narrowing of implicit conversions ----------------------------------- */ + +/* Recursively strip overflow checks. */ +static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode) +{ + IRRef ref = tref_ref(tr); + IRIns *ir = IR(ref); + int op = ir->o; + if (op >= IR_ADDOV && op <= lastop) { + BPropEntry *bp = narrow_bpc_get(J, ref, mode); + if (bp) { + return TREF(bp->val, irt_t(IR(bp->val)->t)); + } else { + IRRef op1 = ir->op1, op2 = ir->op2; /* The IR may be reallocated. */ + op1 = narrow_stripov(J, op1, lastop, mode); + op2 = narrow_stripov(J, op2, lastop, mode); + tr = emitir(IRT(op - IR_ADDOV + IR_ADD, + ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2); + narrow_bpc_set(J, ref, tref_ref(tr), mode); + } + } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) { + tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode); + } + return tr; +} + +/* Narrow array index. */ +TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) +{ + IRIns *ir; + lua_assert(tref_isnumber(tr)); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); + /* Omit some overflow checks for array indexing. See comments above. */ + ir = IR(tref_ref(tr)); + if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) && + (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u) + return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2); + return tr; +} + +/* Narrow conversion to integer operand (overflow undefined). */ +TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr) +{ + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY); + if (!tref_isinteger(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* + ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. + ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same. + */ + return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); +} + +/* Narrow conversion to bitop operand (overflow wrapped). */ +TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr) +{ + if (tref_isstr(tr)) + tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); + if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ + return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J)); + if (!tref_isinteger(tr)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* + ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV. + ** MULOV cannot be stripped due to precision widening. + */ + return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT); +} + +#if LJ_HASFFI +/* Narrow C array index (overflow undefined). */ +TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) +{ + lua_assert(tref_isnumber(tr)); + if (tref_isnum(tr)) + return emitir(IRTI(IR_CONV), tr, + (IRT_INTP<<5)|IRT_NUM|IRCONV_TRUNC|IRCONV_ANY); + /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ + return narrow_stripov(J, tr, IR_MULOV, + LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) : + ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT)); +} +#endif + +/* -- Narrowing of arithmetic operators ----------------------------------- */ + +/* Check whether a number fits into an int32_t (-0 is ok, too). */ +static int numisint(lua_Number n) +{ + return (n == (lua_Number)lj_num2int(n)); +} + +/* Narrowing of arithmetic operations. */ +TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc, + TValue *vb, TValue *vc, IROp op) +{ + if (tref_isstr(rb)) { + rb = emitir(IRTG(IR_STRTO, IRT_NUM), rb, 0); + lj_str_tonum(strV(vb), vb); + } + if (tref_isstr(rc)) { + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + lj_str_tonum(strV(vc), vc); + } + /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */ + if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) && + tref_isinteger(rb) && tref_isinteger(rc) && + numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc), + (int)op - (int)IR_ADD))) + return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc); + if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT); + if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + return emitir(IRTN(op), rb, rc); +} + +/* Narrowing of unary minus operator. */ +TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc) +{ + if (tref_isstr(rc)) { + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + lj_str_tonum(strV(vc), vc); + } + if (tref_isinteger(rc)) { + if ((uint32_t)numberVint(vc) != 0x80000000u) + return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc); + rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + } + return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J)); +} + +/* Narrowing of modulo operator. */ +TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc) +{ + TRef tmp; + if (tvisstr(vc) && !lj_str_tonum(strV(vc), vc)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) && + tref_isinteger(rb) && tref_isinteger(rc) && + (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) { + emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0)); + return emitir(IRTI(IR_MOD), rb, rc); + } + /* b % c ==> b - floor(b/c)*c */ + rb = lj_ir_tonum(J, rb); + rc = lj_ir_tonum(J, rc); + tmp = emitir(IRTN(IR_DIV), rb, rc); + tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR); + tmp = emitir(IRTN(IR_MUL), tmp, rc); + return emitir(IRTN(IR_SUB), rb, tmp); +} + +/* Narrowing of power operator or math.pow. */ +TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc) +{ + if (tvisstr(vc) && !lj_str_tonum(strV(vc), vc)) + lj_trace_err(J, LJ_TRERR_BADTYPE); + /* Narrowing must be unconditional to preserve (-x)^i semantics. */ + if (tvisint(vc) || numisint(numV(vc))) { + int checkrange = 0; + /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */ + if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) { + int32_t k = numberVint(vc); + if (!(k >= -65536 && k <= 65536)) goto split_pow; + checkrange = 1; + } + if (!tref_isinteger(rc)) { + if (tref_isstr(rc)) + rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); + /* Guarded conversion to integer! */ + rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK); + } + if (checkrange && !tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */ + TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536)); + emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536)); + } + return emitir(IRTN(IR_POW), rb, rc); + } +split_pow: + /* FOLD covers most cases, but some are easier to do here. */ + if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb))))) + return rb; /* 1 ^ x ==> 1 */ + rc = lj_ir_tonum(J, rc); + if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5) + return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */ + /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */ + rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2); + rc = emitir(IRTN(IR_MUL), rb, rc); + return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2); +} + +/* -- Predictive narrowing of induction variables ------------------------- */ + +/* Narrow a single runtime value. */ +static int narrow_forl(jit_State *J, cTValue *o) +{ + if (tvisint(o)) return 1; + if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o)); + return 0; +} + +/* Narrow the FORL index type by looking at the runtime values. */ +IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) +{ + lua_assert(tvisnumber(&tv[FORL_IDX]) && + tvisnumber(&tv[FORL_STOP]) && + tvisnumber(&tv[FORL_STEP])); + /* Narrow only if the runtime values of start/stop/step are all integers. */ + if (narrow_forl(J, &tv[FORL_IDX]) && + narrow_forl(J, &tv[FORL_STOP]) && + narrow_forl(J, &tv[FORL_STEP])) { + /* And if the loop index can't possibly overflow. */ + lua_Number step = numberVnum(&tv[FORL_STEP]); + lua_Number sum = numberVnum(&tv[FORL_STOP]) + step; + if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0)) + return IRT_INT; + } + return IRT_NUM; +} + +#undef IR +#undef fins +#undef emitir +#undef emitir_raw + +#endif diff --git a/src/luajit2/src/lj_opt_split.c b/src/luajit2/src/lj_opt_split.c new file mode 100644 index 0000000000..0a1d87cbae --- /dev/null +++ b/src/luajit2/src/lj_opt_split.c @@ -0,0 +1,713 @@ +/* +** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_opt_split_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_vm.h" + +/* SPLIT pass: +** +** This pass splits up 64 bit IR instructions into multiple 32 bit IR +** instructions. It's only active for soft-float targets or for 32 bit CPUs +** which lack native 64 bit integer operations (the FFI is currently the +** only emitter for 64 bit integer instructions). +** +** Splitting the IR in a separate pass keeps each 32 bit IR assembler +** backend simple. Only a small amount of extra functionality needs to be +** implemented. This is much easier than adding support for allocating +** register pairs to each backend (believe me, I tried). A few simple, but +** important optimizations can be performed by the SPLIT pass, which would +** be tedious to do in the backend. +** +** The basic idea is to replace each 64 bit IR instruction with its 32 bit +** equivalent plus an extra HIOP instruction. The splitted IR is not passed +** through FOLD or any other optimizations, so each HIOP is guaranteed to +** immediately follow it's counterpart. The actual functionality of HIOP is +** inferred from the previous instruction. +** +** The operands of HIOP hold the hiword input references. The output of HIOP +** is the hiword output reference, which is also used to hold the hiword +** register or spill slot information. The register allocator treats this +** instruction independently of any other instruction, which improves code +** quality compared to using fixed register pairs. +** +** It's easier to split up some instructions into two regular 32 bit +** instructions. E.g. XLOAD is split up into two XLOADs with two different +** addresses. Obviously 64 bit constants need to be split up into two 32 bit +** constants, too. Some hiword instructions can be entirely omitted, e.g. +** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls +** are split up into two 32 bit arguments each. +** +** On soft-float targets, floating-point instructions are directly converted +** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX). +** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump). +** +** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with +** two int64_t fields: +** +** 0100 p32 ADD base +8 +** 0101 i64 XLOAD 0100 +** 0102 i64 ADD 0101 +1 +** 0103 p32 ADD base +16 +** 0104 i64 XSTORE 0103 0102 +** +** mov rax, [esi+0x8] +** add rax, +0x01 +** mov [esi+0x10], rax +** +** Here's the transformed IR and the x86 machine code after the SPLIT pass: +** +** 0100 p32 ADD base +8 +** 0101 int XLOAD 0100 +** 0102 p32 ADD base +12 +** 0103 int XLOAD 0102 +** 0104 int ADD 0101 +1 +** 0105 int HIOP 0103 +0 +** 0106 p32 ADD base +16 +** 0107 int XSTORE 0106 0104 +** 0108 p32 ADD base +20 +** 0109 int XSTORE 0108 0105 +** +** mov eax, [esi+0x8] +** mov ecx, [esi+0xc] +** add eax, +0x01 +** adc ecx, +0x00 +** mov [esi+0x10], eax +** mov [esi+0x14], ecx +** +** You may notice the reassociated hiword address computation, which is +** later fused into the mov operands by the assembler. +*/ + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Directly emit the transformed IR without updating chains etc. */ +static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2) +{ + IRRef nref = lj_ir_nextins(J); + IRIns *ir = IR(nref); + ir->ot = ot; + ir->op1 = op1; + ir->op2 = op2; + return nref; +} + +#if LJ_SOFTFP +/* Emit a (checked) number to integer conversion. */ +static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check) +{ + IRRef tmp, res; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo); +#endif + res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i); + if (check) { + tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d); + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); + split_emit(J, IRTGI(IR_EQ), tmp, lo); + split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi); + } + return res; +} + +/* Emit a CALLN with one split 64 bit argument. */ +static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); +} + +/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */ +static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1, op2 = ir->op2; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); +} +#endif + +/* Emit a CALLN with two split 64 bit arguments. */ +static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir, + IRIns *ir, IRCallID id) +{ + IRRef tmp, op1 = ir->op1, op2 = ir->op2; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); +#endif + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); + return split_emit(J, + IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), + tmp, tmp); +} + +/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */ +static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref) +{ + IRRef nref = oir[ref].prev; + IRIns *ir = IR(nref); + int32_t ofs = 4; + if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) { + /* Reassociate address. */ + ofs += IR(ir->op2)->i; + nref = ir->op1; + if (ofs == 0) return nref; + } + return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs)); +} + +/* Transform the old IR to the new IR. */ +static void split_ir(jit_State *J) +{ + IRRef nins = J->cur.nins, nk = J->cur.nk; + MSize irlen = nins - nk; + MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); + IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need); + IRRef1 *hisubst; + IRRef ref; + + /* Copy old IR to buffer. */ + memcpy(oir, IR(nk), irlen*sizeof(IRIns)); + /* Bias hiword substitution table and old IR. Loword kept in field prev. */ + hisubst = (IRRef1 *)&oir[irlen] - nk; + oir -= nk; + + /* Remove all IR instructions, but retain IR constants. */ + J->cur.nins = REF_FIRST; + J->loopref = 0; + + /* Process constants and fixed references. */ + for (ref = nk; ref <= REF_BASE; ref++) { + IRIns *ir = &oir[ref]; + if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) { + /* Split up 64 bit constant. */ + TValue tv = *ir_k64(ir); + ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo); + hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi); + } else { + ir->prev = ref; /* Identity substitution for loword. */ + hisubst[ref] = 0; + } + } + + /* Process old IR instructions. */ + for (ref = REF_FIRST; ref < nins; ref++) { + IRIns *ir = &oir[ref]; + IRRef nref = lj_ir_nextins(J); + IRIns *nir = IR(nref); + IRRef hi = 0; + + /* Copy-substitute old instruction to new instruction. */ + nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev; + nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev; + ir->prev = nref; /* Loword substitution. */ + nir->o = ir->o; + nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI); + hisubst[ref] = 0; + + /* Split 64 bit instructions. */ +#if LJ_SOFTFP + if (irt_isnum(ir->t)) { + nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ + /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */ + switch (ir->o) { + case IR_ADD: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add); + break; + case IR_SUB: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub); + break; + case IR_MUL: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul); + break; + case IR_DIV: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div); + break; + case IR_POW: + hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi); + break; + case IR_FPMATH: + /* Try to rejoin pow from EXP2, MUL and LOG2. */ + if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) { + IRIns *irp = IR(nir->op1); + if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) { + IRIns *irm4 = IR(irp->op1); + IRIns *irm3 = IR(irm4->op1); + IRIns *irm12 = IR(irm3->op1); + IRIns *irl1 = IR(irm12->op1); + if (irm12->op1 > J->loopref && irl1->o == IR_CALLN && + irl1->op2 == IRCALL_lj_vm_log2) { + IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */ + IRRef arg3 = irm3->op2, arg4 = irm4->op2; + J->cur.nins--; + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3); + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4); + ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow); + hi = split_emit(J, IRT(IR_HIOP, LJ_SOFTFP), tmp, tmp); + break; + } + } + } + hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2); + break; + case IR_ATAN2: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2); + break; + case IR_LDEXP: + hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp); + break; + case IR_NEG: case IR_ABS: + nir->o = IR_CONV; /* Pass through loword. */ + nir->op2 = (IRT_INT << 5) | IRT_INT; + hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + case IR_SLOAD: + if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */ + nir->op2 &= ~IRSLOAD_CONVERT; + ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref, + IRCALL_softfp_i2d); + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + } + /* fallthrough */ + case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: + case IR_STRTO: + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + case IR_XLOAD: + hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), + split_ptr(J, oir, ir->op1), ir->op2); +#if LJ_BE + ir->prev = hi; hi = nref; +#endif + break; + case IR_ASTORE: case IR_HSTORE: case IR_USTORE: + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]); + break; + case IR_XSTORE: { +#if LJ_LE + IRRef hiref = hisubst[ir->op2]; +#else + IRRef hiref = nir->op2; nir->op2 = hisubst[ir->op2]; +#endif + split_emit(J, IRT(IR_XSTORE, IRT_SOFTFP), + split_ptr(J, oir, ir->op1), hiref); + break; + } + case IR_CONV: { /* Conversion to number. Others handled below. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if LJ_32 && LJ_HASFFI + if (st == IRT_I64 || st == IRT_U64) { + hi = split_call_l(J, hisubst, oir, ir, + st == IRT_I64 ? IRCALL_softfp_l2d : IRCALL_softfp_ul2d); + break; + } +#endif + lua_assert(st == IRT_INT || + (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); + nir->o = IR_CALLN; +#if LJ_32 && LJ_HASFFI + nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : + st == IRT_FLOAT ? IRCALL_softfp_f2d : + IRCALL_softfp_ui2d; +#else + nir->op2 = IRCALL_softfp_i2d; +#endif + hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + break; + } + case IR_CALLS: + case IR_CALLXS: + goto split_call; + case IR_PHI: + if (nir->op1 == nir->op2) + J->cur.nins--; /* Drop useless PHIs. */ + if (hisubst[ir->op1] != hisubst[ir->op2]) + split_emit(J, IRT(IR_PHI, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + default: + lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); + hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), + hisubst[ir->op1], hisubst[ir->op2]); + break; + } + } else +#endif +#if LJ_32 && LJ_HASFFI + if (irt_isint64(ir->t)) { + IRRef hiref = hisubst[ir->op1]; + nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ + switch (ir->o) { + case IR_ADD: + case IR_SUB: + /* Use plain op for hiword if loword cannot produce a carry/borrow. */ + if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) { + ir->prev = nir->op1; /* Pass through loword. */ + nir->op1 = hiref; nir->op2 = hisubst[ir->op2]; + hi = nref; + break; + } + /* fallthrough */ + case IR_NEG: + hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]); + break; + case IR_MUL: + hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64); + break; + case IR_DIV: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : + IRCALL_lj_carith_divu64); + break; + case IR_MOD: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : + IRCALL_lj_carith_modu64); + break; + case IR_POW: + hi = split_call_ll(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : + IRCALL_lj_carith_powu64); + break; + case IR_FLOAD: + lua_assert(ir->op2 == IRFL_CDATA_INT64); + hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64HI); +#if LJ_BE + ir->prev = hi; hi = nref; +#endif + break; + case IR_XLOAD: + hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2); +#if LJ_BE + ir->prev = hi; hi = nref; +#endif + break; + case IR_XSTORE: +#if LJ_LE + hiref = hisubst[ir->op2]; +#else + hiref = nir->op2; nir->op2 = hisubst[ir->op2]; +#endif + split_emit(J, IRTI(IR_XSTORE), split_ptr(J, oir, ir->op1), hiref); + break; + case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if LJ_SOFTFP + if (st == IRT_NUM) { /* NUM to 64 bit int conv. */ + hi = split_call_l(J, hisubst, oir, ir, + irt_isi64(ir->t) ? IRCALL_softfp_d2l : IRCALL_softfp_d2ul); + } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */ + nir->o = IR_CALLN; + nir->op2 = irt_isi64(ir->t) ? IRCALL_softfp_f2l : IRCALL_softfp_f2ul; + hi = split_emit(J, IRTI(IR_HIOP), nref, nref); + } +#else + if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */ + hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref); + } +#endif + else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */ + /* Drop cast, since assembler doesn't care. */ + goto fwdlo; + } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */ + IRRef k31 = lj_ir_kint(J, 31); + nir = IR(nref); /* May have been reallocated. */ + ir->prev = nir->op1; /* Pass through loword. */ + nir->o = IR_BSAR; /* hi = bsar(lo, 31). */ + nir->op2 = k31; + hi = nref; + } else { /* Zero-extend to 64 bit. */ + hi = lj_ir_kint(J, 0); + goto fwdlo; + } + break; + } + case IR_CALLXS: + goto split_call; + case IR_PHI: { + IRRef hiref2; + if ((irref_isk(nir->op1) && irref_isk(nir->op2)) || + nir->op1 == nir->op2) + J->cur.nins--; /* Drop useless PHIs. */ + hiref2 = hisubst[ir->op2]; + if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2)) + split_emit(J, IRTI(IR_PHI), hiref, hiref2); + break; + } + default: + lua_assert(ir->o <= IR_NE); /* Comparisons. */ + split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); + break; + } + } else +#endif +#if LJ_SOFTFP + if (ir->o == IR_SLOAD) { + if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */ + nir->op2 &= ~IRSLOAD_CONVERT; + if (!(nir->op2 & IRSLOAD_TYPECHECK)) + nir->t.irt = IRT_INT; /* Drop guard. */ + split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); + ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t)); + } + } else if (ir->o == IR_TOBIT) { + IRRef tmp, op1 = ir->op1; + J->cur.nins--; +#if LJ_LE + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); +#else + tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); +#endif + ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit); + } else if (ir->o == IR_TOSTR) { + if (hisubst[ir->op1]) { + if (irref_isk(ir->op1)) + nir->op1 = ir->op1; + else + split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref); + } + } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) { + if (irref_isk(ir->op2) && hisubst[ir->op2]) + nir->op2 = ir->op2; + } else +#endif + if (ir->o == IR_CONV) { /* See above, too. */ + IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); +#if LJ_32 && LJ_HASFFI + if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */ +#if LJ_SOFTFP + if (irt_isfloat(ir->t)) { + split_call_l(J, hisubst, oir, ir, + st == IRT_I64 ? IRCALL_softfp_l2f : IRCALL_softfp_ul2f); + J->cur.nins--; /* Drop unused HIOP. */ + } +#else + if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */ + ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)), + hisubst[ir->op1], nref); + } +#endif + else { /* Truncate to lower 32 bits. */ + fwdlo: + ir->prev = nir->op1; /* Forward loword. */ + /* Replace with NOP to avoid messing up the snapshot logic. */ + nir->ot = IRT(IR_NOP, IRT_NIL); + nir->op1 = nir->op2 = 0; + } + } +#endif +#if LJ_SOFTFP && LJ_32 && LJ_HASFFI + else if (irt_isfloat(ir->t)) { + if (st == IRT_NUM) { + split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f); + J->cur.nins--; /* Drop unused HIOP. */ + } else { + nir->o = IR_CALLN; + nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f; + } + } else if (st == IRT_FLOAT) { + nir->o = IR_CALLN; + nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui; + } else +#endif +#if LJ_SOFTFP + if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { + if (irt_isguard(ir->t)) { + lua_assert(st == IRT_NUM && irt_isint(ir->t)); + J->cur.nins--; + ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); + } else { + split_call_l(J, hisubst, oir, ir, +#if LJ_32 && LJ_HASFFI + st == IRT_NUM ? + (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) : + (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui) +#else + IRCALL_softfp_d2i +#endif + ); + J->cur.nins--; /* Drop unused HIOP. */ + } + } +#endif + } else if (ir->o == IR_CALLXS) { + IRRef hiref; + split_call: + hiref = hisubst[ir->op1]; + if (hiref) { + IROpT ot = nir->ot; + IRRef op2 = nir->op2; + nir->ot = IRT(IR_CARG, IRT_NIL); +#if LJ_LE + nir->op2 = hiref; +#else + nir->op2 = nir->op1; nir->op1 = hiref; +#endif + ir->prev = nref = split_emit(J, ot, nref, op2); + } + if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t)) + hi = split_emit(J, + IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), + nref, nref); + } else if (ir->o == IR_CARG) { + IRRef hiref = hisubst[ir->op1]; + if (hiref) { + IRRef op2 = nir->op2; +#if LJ_LE + nir->op2 = hiref; +#else + nir->op2 = nir->op1; nir->op1 = hiref; +#endif + ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); + nir = IR(nref); + } + hiref = hisubst[ir->op2]; + if (hiref) { +#if !LJ_TARGET_X86 + int carg = 0; + IRIns *cir; + for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1)) + carg++; + if ((carg & 1) == 0) { /* Align 64 bit arguments. */ + IRRef op2 = nir->op2; + nir->op2 = REF_NIL; + nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); + nir = IR(nref); + } +#endif +#if LJ_BE + { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; } +#endif + ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref); + } + } else if (ir->o == IR_CNEWI) { + if (hisubst[ir->op2]) + split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]); + } else if (ir->o == IR_LOOP) { + J->loopref = nref; /* Needed by assembler. */ + } + hisubst[ref] = hi; /* Store hiword substitution. */ + } + + /* Add PHI marks. */ + for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) { + IRIns *ir = IR(ref); + if (ir->o != IR_PHI) break; + if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t); + if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t); + } + + /* Substitute snapshot maps. */ + oir[nins].prev = J->cur.nins; /* Substitution for last snapshot. */ + { + SnapNo i, nsnap = J->cur.nsnap; + for (i = 0; i < nsnap; i++) { + SnapShot *snap = &J->cur.snap[i]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + snap->ref = snap->ref == REF_FIRST ? REF_FIRST : oir[snap->ref].prev; + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRIns *ir = &oir[snap_ref(sn)]; + if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn)))) + map[n] = ((sn & 0xffff0000) | ir->prev); + } + } + } +} + +/* Protected callback for split pass. */ +static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + split_ir(J); + UNUSED(L); UNUSED(dummy); + return NULL; +} + +#if defined(LUA_USE_ASSERT) || LJ_SOFTFP +/* Slow, but sure way to check whether a SPLIT pass is needed. */ +static int split_needsplit(jit_State *J) +{ + IRIns *ir, *irend; + IRRef ref; + for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++) + if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t)) + return 1; + if (LJ_SOFTFP) { + for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev) + if ((IR(ref)->op2 & IRSLOAD_CONVERT)) + return 1; + } + for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) + if ((LJ_SOFTFP && (IR(ref)->op2 & IRCONV_SRCMASK) == IRT_NUM) || + (IR(ref)->op2 & IRCONV_SRCMASK) == IRT_I64 || + (IR(ref)->op2 & IRCONV_SRCMASK) == IRT_U64) + return 1; + return 0; /* Nope. */ +} +#endif + +/* SPLIT pass. */ +void lj_opt_split(jit_State *J) +{ +#if LJ_SOFTFP + if (!J->needsplit) + J->needsplit = split_needsplit(J); +#else + lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */ +#endif + if (J->needsplit) { + int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); + if (errcode) { + /* Completely reset the trace to avoid inconsistent dump on abort. */ + J->cur.nins = J->cur.nk = REF_BASE; + J->cur.nsnap = 0; + lj_err_throw(J->L, errcode); /* Propagate errors. */ + } + } +} + +#undef IR + +#endif diff --git a/src/luajit2/src/lj_parse.c b/src/luajit2/src/lj_parse.c new file mode 100644 index 0000000000..f0bb44199b --- /dev/null +++ b/src/luajit2/src/lj_parse.c @@ -0,0 +1,2511 @@ +/* +** Lua parser (source code -> bytecode). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_parse_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_state.h" +#include "lj_bc.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_lex.h" +#include "lj_parse.h" +#include "lj_vm.h" +#include "lj_vmevent.h" + +/* -- Parser structures and definitions ----------------------------------- */ + +/* Expression kinds. */ +typedef enum { + /* Constant expressions must be first and in this order: */ + VKNIL, + VKFALSE, + VKTRUE, + VKSTR, /* sval = string value */ + VKNUM, /* nval = number value */ + VKLAST = VKNUM, + VKCDATA, /* nval = cdata value, not treated as a constant expression */ + /* Non-constant expressions follow: */ + VLOCAL, /* info = local register */ + VUPVAL, /* info = upvalue index */ + VGLOBAL, /* sval = string value */ + VINDEXED, /* info = table register, aux = index reg/byte/string const */ + VJMP, /* info = instruction PC */ + VRELOCABLE, /* info = instruction PC */ + VNONRELOC, /* info = result register */ + VCALL, /* info = instruction PC, aux = base */ + VVOID +} ExpKind; + +/* Expression descriptor. */ +typedef struct ExpDesc { + union { + struct { + uint32_t info; /* Primary info. */ + uint32_t aux; /* Secondary info. */ + } s; + TValue nval; /* Number value. */ + GCstr *sval; /* String value. */ + } u; + ExpKind k; + BCPos t; /* True condition jump list. */ + BCPos f; /* False condition jump list. */ +} ExpDesc; + +/* Macros for expressions. */ +#define expr_hasjump(e) ((e)->t != (e)->f) + +#define expr_isk(e) ((e)->k <= VKLAST) +#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e)) +#define expr_isnumk(e) ((e)->k == VKNUM) +#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e)) +#define expr_isstrk(e) ((e)->k == VKSTR) + +#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval) +#define expr_numberV(e) numberVnum(expr_numtv((e))) + +/* Initialize expression. */ +static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info) +{ + e->k = k; + e->u.s.info = info; + e->f = e->t = NO_JMP; +} + +/* Check number constant for +-0. */ +static int expr_numiszero(ExpDesc *e) +{ + TValue *o = expr_numtv(e); + return tvisint(o) ? (intV(o) == 0) : tviszero(o); +} + +/* Per-function linked list of scope blocks. */ +typedef struct FuncScope { + struct FuncScope *prev; /* Link to outer scope. */ + BCPos breaklist; /* Jump list for loop breaks. */ + uint8_t nactvar; /* Number of active vars outside the scope. */ + uint8_t upval; /* Some variable in the scope is an upvalue. */ + uint8_t isbreakable; /* Scope is a loop and allows a break. */ +} FuncScope; + +/* Index into variable stack. */ +typedef uint16_t VarIndex; +#define LJ_MAX_VSTACK 65536 + +/* Upvalue map. */ +typedef struct UVMap { + VarIndex vidx; /* Varinfo index. */ + uint16_t slot; /* Slot or parent upvalue index. */ +} UVMap; + +/* Per-function state. */ +typedef struct FuncState { + GCtab *kt; /* Hash table for constants. */ + LexState *ls; /* Lexer state. */ + lua_State *L; /* Lua state. */ + FuncScope *bl; /* Current scope. */ + struct FuncState *prev; /* Enclosing function. */ + BCPos pc; /* Next bytecode position. */ + BCPos lasttarget; /* Bytecode position of last jump target. */ + BCPos jpc; /* Pending jump list to next bytecode. */ + BCReg freereg; /* First free register. */ + BCReg nactvar; /* Number of active local variables. */ + BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */ + BCLine linedefined; /* First line of the function definition. */ + BCInsLine *bcbase; /* Base of bytecode stack. */ + BCPos bclim; /* Limit of bytecode stack. */ + MSize vbase; /* Base of variable stack for this function. */ + uint8_t flags; /* Prototype flags. */ + uint8_t numparams; /* Number of parameters. */ + uint8_t framesize; /* Fixed frame size. */ + uint8_t nuv; /* Number of upvalues */ + VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */ + UVMap uvloc[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx and slot. */ +} FuncState; + +/* Binary and unary operators. ORDER OPR */ +typedef enum BinOpr { + OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */ + OPR_CONCAT, + OPR_NE, OPR_EQ, + OPR_LT, OPR_GE, OPR_LE, OPR_GT, + OPR_AND, OPR_OR, + OPR_NOBINOPR +} BinOpr; + +LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT); +LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); +LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); + +/* -- Error handling ------------------------------------------------------ */ + +LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) +{ + lj_lex_error(ls, ls->token, em); +} + +LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken token) +{ + lj_lex_error(ls, ls->token, LJ_ERR_XTOKEN, lj_lex_token2str(ls, token)); +} + +LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what) +{ + if (fs->linedefined == 0) + lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what); + else + lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what); +} + +#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m) +#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m) +#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); } + +/* -- Management of constants --------------------------------------------- */ + +/* Return bytecode encoding for primitive constant. */ +#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k) + +#define tvhaskslot(o) ((o)->u32.hi == 0) +#define tvkslot(o) ((o)->u32.lo) + +/* Add a number constant. */ +static BCReg const_num(FuncState *fs, ExpDesc *e) +{ + lua_State *L = fs->L; + TValue *o; + lua_assert(expr_isnumk(e)); + o = lj_tab_set(L, fs->kt, &e->u.nval); + if (tvhaskslot(o)) + return tvkslot(o); + o->u64 = fs->nkn; + return fs->nkn++; +} + +/* Add a GC object constant. */ +static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype) +{ + lua_State *L = fs->L; + TValue key, *o; + setgcV(L, &key, gc, itype); + /* NOBARRIER: the key is new or kept alive. */ + o = lj_tab_set(L, fs->kt, &key); + if (tvhaskslot(o)) + return tvkslot(o); + o->u64 = fs->nkgc; + return fs->nkgc++; +} + +/* Add a string constant. */ +static BCReg const_str(FuncState *fs, ExpDesc *e) +{ + lua_assert(expr_isstrk(e) || e->k == VGLOBAL); + return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); +} + +/* Anchor string constant to avoid GC. */ +GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len) +{ + /* NOBARRIER: the key is new or kept alive. */ + lua_State *L = ls->L; + GCstr *s = lj_str_new(L, str, len); + TValue *tv = lj_tab_setstr(L, ls->fs->kt, s); + if (tvisnil(tv)) setboolV(tv, 1); + lj_gc_check(L); + return s; +} + +#if LJ_HASFFI +/* Anchor cdata to avoid GC. */ +void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd) +{ + /* NOBARRIER: the key is new or kept alive. */ + lua_State *L = ls->L; + setcdataV(L, tv, cd); + setboolV(lj_tab_set(L, ls->fs->kt, tv), 1); +} +#endif + +/* -- Jump list handling -------------------------------------------------- */ + +/* Get next element in jump list. */ +static BCPos jmp_next(FuncState *fs, BCPos pc) +{ + ptrdiff_t delta = bc_j(fs->bcbase[pc].ins); + if ((BCPos)delta == NO_JMP) + return NO_JMP; + else + return (BCPos)(((ptrdiff_t)pc+1)+delta); +} + +/* Check if any of the instructions on the jump list produce no value. */ +static int jmp_novalue(FuncState *fs, BCPos list) +{ + for (; list != NO_JMP; list = jmp_next(fs, list)) { + BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins; + if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG)) + return 1; + } + return 0; +} + +/* Patch register of test instructions. */ +static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg) +{ + BCIns *ip = &fs->bcbase[pc >= 1 ? pc-1 : pc].ins; + BCOp op = bc_op(*ip); + if (op == BC_ISTC || op == BC_ISFC) { + if (reg != NO_REG && reg != bc_d(*ip)) { + setbc_a(ip, reg); + } else { /* Nothing to store or already in the right register. */ + setbc_op(ip, op+(BC_IST-BC_ISTC)); + setbc_a(ip, 0); + } + } else if (bc_a(*ip) == NO_REG) { + if (reg == NO_REG) + *ip = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0); + else + setbc_a(ip, reg); + } else { + return 0; /* Cannot patch other instructions. */ + } + return 1; +} + +/* Drop values for all instructions on jump list. */ +static void jmp_dropval(FuncState *fs, BCPos list) +{ + for (; list != NO_JMP; list = jmp_next(fs, list)) + jmp_patchtestreg(fs, list, NO_REG); +} + +/* Patch jump instruction to target. */ +static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest) +{ + BCIns *jmp = &fs->bcbase[pc].ins; + BCPos offset = dest-(pc+1)+BCBIAS_J; + lua_assert(dest != NO_JMP); + if (offset > BCMAX_D) + err_syntax(fs->ls, LJ_ERR_XJUMP); + setbc_d(jmp, offset); +} + +/* Append to jump list. */ +static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2) +{ + if (l2 == NO_JMP) { + return; + } else if (*l1 == NO_JMP) { + *l1 = l2; + } else { + BCPos list = *l1; + BCPos next; + while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */ + list = next; + jmp_patchins(fs, list, l2); + } +} + +/* Patch jump list and preserve produced values. */ +static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget, + BCReg reg, BCPos dtarget) +{ + while (list != NO_JMP) { + BCPos next = jmp_next(fs, list); + if (jmp_patchtestreg(fs, list, reg)) + jmp_patchins(fs, list, vtarget); /* Jump to target with value. */ + else + jmp_patchins(fs, list, dtarget); /* Jump to default target. */ + list = next; + } +} + +/* Jump to following instruction. Append to list of pending jumps. */ +static void jmp_tohere(FuncState *fs, BCPos list) +{ + fs->lasttarget = fs->pc; + jmp_append(fs, &fs->jpc, list); +} + +/* Patch jump list to target. */ +static void jmp_patch(FuncState *fs, BCPos list, BCPos target) +{ + if (target == fs->pc) { + jmp_tohere(fs, list); + } else { + lua_assert(target < fs->pc); + jmp_patchval(fs, list, target, NO_REG, target); + } +} + +/* -- Bytecode register allocator ----------------------------------------- */ + +/* Bump frame size. */ +static void bcreg_bump(FuncState *fs, BCReg n) +{ + BCReg sz = fs->freereg + n; + if (sz > fs->framesize) { + if (sz >= LJ_MAX_SLOTS) + err_syntax(fs->ls, LJ_ERR_XSLOTS); + fs->framesize = (uint8_t)sz; + } +} + +/* Reserve registers. */ +static void bcreg_reserve(FuncState *fs, BCReg n) +{ + bcreg_bump(fs, n); + fs->freereg += n; +} + +/* Free register. */ +static void bcreg_free(FuncState *fs, BCReg reg) +{ + if (reg >= fs->nactvar) { + fs->freereg--; + lua_assert(reg == fs->freereg); + } +} + +/* Free register for expression. */ +static void expr_free(FuncState *fs, ExpDesc *e) +{ + if (e->k == VNONRELOC) + bcreg_free(fs, e->u.s.info); +} + +/* -- Bytecode emitter ---------------------------------------------------- */ + +/* Emit bytecode instruction. */ +static BCPos bcemit_INS(FuncState *fs, BCIns ins) +{ + BCPos pc = fs->pc; + LexState *ls = fs->ls; + jmp_patchval(fs, fs->jpc, pc, NO_REG, pc); + fs->jpc = NO_JMP; + if (LJ_UNLIKELY(pc >= fs->bclim)) { + ptrdiff_t base = fs->bcbase - ls->bcstack; + checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions"); + lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine); + fs->bclim = (BCPos)(ls->sizebcstack - base); + fs->bcbase = ls->bcstack + base; + } + fs->bcbase[pc].ins = ins; + fs->bcbase[pc].line = ls->lastline; + fs->pc = pc+1; + return pc; +} + +#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c)) +#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d)) +#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j)) + +#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins) + +/* -- Bytecode emitter for expressions ------------------------------------ */ + +/* Discharge non-constant expression to any register. */ +static void expr_discharge(FuncState *fs, ExpDesc *e) +{ + BCIns ins; + if (e->k == VUPVAL) { + ins = BCINS_AD(BC_UGET, 0, e->u.s.info); + } else if (e->k == VGLOBAL) { + ins = BCINS_AD(BC_GGET, 0, const_str(fs, e)); + } else if (e->k == VINDEXED) { + BCReg rc = e->u.s.aux; + if ((int32_t)rc < 0) { + ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc); + } else if (rc > BCMAX_C) { + ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1)); + } else { + bcreg_free(fs, rc); + ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc); + } + bcreg_free(fs, e->u.s.info); + } else if (e->k == VCALL) { + e->u.s.info = e->u.s.aux; + e->k = VNONRELOC; + return; + } else if (e->k == VLOCAL) { + e->k = VNONRELOC; + return; + } else { + return; + } + e->u.s.info = bcemit_INS(fs, ins); + e->k = VRELOCABLE; +} + +/* Emit bytecode to set a range of registers to nil. */ +static void bcemit_nil(FuncState *fs, BCReg from, BCReg n) +{ + if (fs->pc > fs->lasttarget) { /* No jumps to current position? */ + BCIns *ip = &fs->bcbase[fs->pc-1].ins; + BCReg pto, pfrom = bc_a(*ip); + switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */ + case BC_KPRI: + if (bc_d(*ip) != ~LJ_TNIL) break; + if (from == pfrom) { + if (n == 1) return; + } else if (from == pfrom+1) { + from = pfrom; + n++; + } else { + break; + } + fs->pc--; /* Drop KPRI. */ + break; + case BC_KNIL: + pto = bc_d(*ip); + if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */ + if (from+n-1 > pto) + setbc_d(ip, from+n-1); /* Patch previous instruction range. */ + return; + } + break; + default: + break; + } + } + /* Emit new instruction or replace old instruction. */ + bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) : + BCINS_AD(BC_KNIL, from, from+n-1)); +} + +/* Discharge an expression to a specific register. Ignore branches. */ +static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg) +{ + BCIns ins; + expr_discharge(fs, e); + if (e->k == VKSTR) { + ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e)); + } else if (e->k == VKNUM) { +#if LJ_DUALNUM + cTValue *tv = expr_numtv(e); + if (tvisint(tv) && checki16(intV(tv))) + ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv)); + else +#else + lua_Number n = expr_numberV(e); + int32_t k = lj_num2int(n); + if (checki16(k) && n == (lua_Number)k) + ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k); + else +#endif + ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e)); +#if LJ_HASFFI + } else if (e->k == VKCDATA) { + fs->flags |= PROTO_FFI; + ins = BCINS_AD(BC_KCDATA, reg, + const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA)); +#endif + } else if (e->k == VRELOCABLE) { + setbc_a(bcptr(fs, e), reg); + goto noins; + } else if (e->k == VNONRELOC) { + if (reg == e->u.s.info) + goto noins; + ins = BCINS_AD(BC_MOV, reg, e->u.s.info); + } else if (e->k == VKNIL) { + bcemit_nil(fs, reg, 1); + goto noins; + } else if (e->k <= VKTRUE) { + ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); + } else { + lua_assert(e->k == VVOID || e->k == VJMP); + return; + } + bcemit_INS(fs, ins); +noins: + e->u.s.info = reg; + e->k = VNONRELOC; +} + +/* Forward declaration. */ +static BCPos bcemit_jmp(FuncState *fs); + +/* Discharge an expression to a specific register. */ +static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg) +{ + expr_toreg_nobranch(fs, e, reg); + if (e->k == VJMP) + jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */ + if (expr_hasjump(e)) { /* Discharge expression with branches. */ + BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP; + if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) { + BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs); + jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE); + bcemit_AJ(fs, BC_JMP, fs->freereg, 1); + jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE); + jmp_tohere(fs, jval); + } + jend = fs->pc; + fs->lasttarget = jend; + jmp_patchval(fs, e->f, jend, reg, jfalse); + jmp_patchval(fs, e->t, jend, reg, jtrue); + } + e->f = e->t = NO_JMP; + e->u.s.info = reg; + e->k = VNONRELOC; +} + +/* Discharge an expression to the next free register. */ +static void expr_tonextreg(FuncState *fs, ExpDesc *e) +{ + expr_discharge(fs, e); + expr_free(fs, e); + bcreg_reserve(fs, 1); + expr_toreg(fs, e, fs->freereg - 1); +} + +/* Discharge an expression to any register. */ +static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e) +{ + expr_discharge(fs, e); + if (e->k == VNONRELOC) { + if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */ + if (e->u.s.info >= fs->nactvar) { + expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */ + return e->u.s.info; + } + } + expr_tonextreg(fs, e); /* Discharge to next register. */ + return e->u.s.info; +} + +/* Partially discharge expression to a value. */ +static void expr_toval(FuncState *fs, ExpDesc *e) +{ + if (expr_hasjump(e)) + expr_toanyreg(fs, e); + else + expr_discharge(fs, e); +} + +/* Emit store for LHS expression. */ +static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) +{ + BCIns ins; + if (var->k == VLOCAL) { + expr_free(fs, e); + expr_toreg(fs, e, var->u.s.info); + return; + } else if (var->k == VUPVAL) { + expr_toval(fs, e); + if (e->k <= VKTRUE) + ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e)); + else if (e->k == VKSTR) + ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e)); + else if (e->k == VKNUM) + ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e)); + else + ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e)); + } else if (var->k == VGLOBAL) { + BCReg ra = expr_toanyreg(fs, e); + ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); + } else { + BCReg ra, rc; + lua_assert(var->k == VINDEXED); + ra = expr_toanyreg(fs, e); + rc = var->u.s.aux; + if ((int32_t)rc < 0) { + ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc); + } else if (rc > BCMAX_C) { + ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); + } else { + /* Free late alloced key reg to avoid assert on free of value reg. */ + /* This can only happen when called from expr_table(). */ + lua_assert(e->k != VNONRELOC || ra < fs->nactvar || + rc < ra || (bcreg_free(fs, rc),1)); + ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); + } + } + bcemit_INS(fs, ins); + expr_free(fs, e); +} + +/* Emit method lookup expression. */ +static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key) +{ + BCReg idx, func, obj = expr_toanyreg(fs, e); + expr_free(fs, e); + func = fs->freereg; + bcemit_AD(fs, BC_MOV, func+1, obj); /* Copy object to first argument. */ + lua_assert(expr_isstrk(key)); + idx = const_str(fs, key); + if (idx <= BCMAX_C) { + bcreg_reserve(fs, 2); + bcemit_ABC(fs, BC_TGETS, func, obj, idx); + } else { + bcreg_reserve(fs, 3); + bcemit_AD(fs, BC_KSTR, func+2, idx); + bcemit_ABC(fs, BC_TGETV, func, obj, func+2); + fs->freereg--; + } + e->u.s.info = func; + e->k = VNONRELOC; +} + +/* -- Bytecode emitter for branches --------------------------------------- */ + +/* Emit unconditional branch. */ +static BCPos bcemit_jmp(FuncState *fs) +{ + BCPos jpc = fs->jpc; + BCPos j = fs->pc - 1; + BCIns *ip = &fs->bcbase[j].ins; + fs->jpc = NO_JMP; + if ((int32_t)j >= (int32_t)fs->lasttarget && + bc_op(*ip) == BC_UCLO) + setbc_j(ip, NO_JMP); + else + j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP); + jmp_append(fs, &j, jpc); + return j; +} + +/* Invert branch condition of bytecode instruction. */ +static void invertcond(FuncState *fs, ExpDesc *e) +{ + BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins; + setbc_op(ip, bc_op(*ip)^1); +} + +/* Emit conditional branch. */ +static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond) +{ + BCPos pc; + if (e->k == VRELOCABLE) { + BCIns *ip = bcptr(fs, e); + if (bc_op(*ip) == BC_NOT) { + *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip)); + return bcemit_jmp(fs); + } + } + if (e->k != VNONRELOC) { + bcreg_reserve(fs, 1); + expr_toreg_nobranch(fs, e, fs->freereg-1); + } + bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info); + pc = bcemit_jmp(fs); + expr_free(fs, e); + return pc; +} + +/* Emit branch on true condition. */ +static void bcemit_branch_t(FuncState *fs, ExpDesc *e) +{ + BCPos pc; + expr_discharge(fs, e); + if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) + pc = NO_JMP; /* Never jump. */ + else if (e->k == VJMP) + invertcond(fs, e), pc = e->u.s.info; + else if (e->k == VKFALSE || e->k == VKNIL) + expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); + else + pc = bcemit_branch(fs, e, 0); + jmp_append(fs, &e->f, pc); + jmp_tohere(fs, e->t); + e->t = NO_JMP; +} + +/* Emit branch on false condition. */ +static void bcemit_branch_f(FuncState *fs, ExpDesc *e) +{ + BCPos pc; + expr_discharge(fs, e); + if (e->k == VKNIL || e->k == VKFALSE) + pc = NO_JMP; /* Never jump. */ + else if (e->k == VJMP) + pc = e->u.s.info; + else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) + expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); + else + pc = bcemit_branch(fs, e, 1); + jmp_append(fs, &e->t, pc); + jmp_tohere(fs, e->f); + e->f = NO_JMP; +} + +/* -- Bytecode emitter for operators -------------------------------------- */ + +/* Try constant-folding of arithmetic operators. */ +static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + TValue o; + lua_Number n; + if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0; + n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD); + setnumV(&o, n); + if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */ + if (LJ_DUALNUM) { + int32_t k = lj_num2int(n); + if ((lua_Number)k == n) { + setintV(&e1->u.nval, k); + return 1; + } + } + setnumV(&e1->u.nval, n); + return 1; +} + +/* Emit arithmetic operator. */ +static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + BCReg rb, rc, t; + uint32_t op; + if (foldarith(opr, e1, e2)) + return; + if (opr == OPR_POW) { + op = BC_POW; + rc = expr_toanyreg(fs, e2); + rb = expr_toanyreg(fs, e1); + } else { + op = opr-OPR_ADD+BC_ADDVV; + /* Must discharge 2nd operand first since VINDEXED might free regs. */ + expr_toval(fs, e2); + if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C) + op -= BC_ADDVV-BC_ADDVN; + else + rc = expr_toanyreg(fs, e2); + /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ + lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); + expr_toval(fs, e1); + /* Avoid two consts to satisfy bytecode constraints. */ + if (expr_isnumk(e1) && !expr_isnumk(e2) && + (t = const_num(fs, e1)) <= BCMAX_B) { + rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV; + } else { + rb = expr_toanyreg(fs, e1); + } + } + /* Using expr_free might cause asserts if the order is wrong. */ + if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; + if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; + e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc); + e1->k = VRELOCABLE; +} + +/* Emit comparison operator. */ +static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) +{ + ExpDesc *eret = e1; + BCIns ins; + expr_toval(fs, e1); + if (opr == OPR_EQ || opr == OPR_NE) { + BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV; + BCReg ra; + if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */ + ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */ + expr_toval(fs, e2); + switch (e2->k) { + case VKNIL: case VKFALSE: case VKTRUE: + ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2)); + break; + case VKSTR: + ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2)); + break; + case VKNUM: + ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2)); + break; + default: + ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2)); + break; + } + } else { + uint32_t op = opr-OPR_LT+BC_ISLT; + BCReg ra; + if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */ + e1 = e2; e2 = eret; /* Swap operands. */ + op = ((op-BC_ISLT)^3)+BC_ISLT; + } + ra = expr_toanyreg(fs, e1); + ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2)); + } + /* Using expr_free might cause asserts if the order is wrong. */ + if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; + if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; + bcemit_INS(fs, ins); + eret->u.s.info = bcemit_jmp(fs); + eret->k = VJMP; +} + +/* Fixup left side of binary operator. */ +static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e) +{ + if (op == OPR_AND) { + bcemit_branch_t(fs, e); + } else if (op == OPR_OR) { + bcemit_branch_f(fs, e); + } else if (op == OPR_CONCAT) { + expr_tonextreg(fs, e); + } else if (op == OPR_EQ || op == OPR_NE) { + if (!expr_isk_nojump(e)) expr_toanyreg(fs, e); + } else { + if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e); + } +} + +/* Emit binary operator. */ +static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) +{ + if (op <= OPR_POW) { + bcemit_arith(fs, op, e1, e2); + } else if (op == OPR_AND) { + lua_assert(e1->t == NO_JMP); /* List must be closed. */ + expr_discharge(fs, e2); + jmp_append(fs, &e2->f, e1->f); + *e1 = *e2; + } else if (op == OPR_OR) { + lua_assert(e1->f == NO_JMP); /* List must be closed. */ + expr_discharge(fs, e2); + jmp_append(fs, &e2->t, e1->t); + *e1 = *e2; + } else if (op == OPR_CONCAT) { + expr_toval(fs, e2); + if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { + lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); + expr_free(fs, e1); + setbc_b(bcptr(fs, e2), e1->u.s.info); + e1->u.s.info = e2->u.s.info; + } else { + expr_tonextreg(fs, e2); + expr_free(fs, e2); + expr_free(fs, e1); + e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info); + } + e1->k = VRELOCABLE; + } else { + lua_assert(op == OPR_NE || op == OPR_EQ || + op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); + bcemit_comp(fs, op, e1, e2); + } +} + +/* Emit unary operator. */ +static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e) +{ + if (op == BC_NOT) { + /* Swap true and false lists. */ + { BCPos temp = e->f; e->f = e->t; e->t = temp; } + jmp_dropval(fs, e->f); + jmp_dropval(fs, e->t); + expr_discharge(fs, e); + if (e->k == VKNIL || e->k == VKFALSE) { + e->k = VKTRUE; + return; + } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) { + e->k = VKFALSE; + return; + } else if (e->k == VJMP) { + invertcond(fs, e); + return; + } else if (e->k == VRELOCABLE) { + bcreg_reserve(fs, 1); + setbc_a(bcptr(fs, e), fs->freereg-1); + e->u.s.info = fs->freereg-1; + e->k = VNONRELOC; + } else { + lua_assert(e->k == VNONRELOC); + } + } else { + lua_assert(op == BC_UNM || op == BC_LEN); + if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ +#if LJ_HASFFI + if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ + GCcdata *cd = cdataV(&e->u.nval); + int64_t *p = (int64_t *)cdataptr(cd); + if (cd->typeid == CTID_COMPLEX_DOUBLE) + p[1] ^= (int64_t)U64x(80000000,00000000); + else + *p = -*p; + return; + } else +#endif + if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */ + TValue *o = expr_numtv(e); + if (tvisint(o)) { + int32_t k = intV(o); + if (k == -k) + setnumV(o, -(lua_Number)k); + else + setintV(o, -k); + return; + } else { + o->u64 ^= U64x(80000000,00000000); + return; + } + } + } + expr_toanyreg(fs, e); + } + expr_free(fs, e); + e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info); + e->k = VRELOCABLE; +} + +/* -- Lexer support ------------------------------------------------------- */ + +/* Check and consume optional token. */ +static int lex_opt(LexState *ls, LexToken tok) +{ + if (ls->token == tok) { + lj_lex_next(ls); + return 1; + } + return 0; +} + +/* Check and consume token. */ +static void lex_check(LexState *ls, LexToken tok) +{ + if (ls->token != tok) + err_token(ls, tok); + lj_lex_next(ls); +} + +/* Check for matching token. */ +static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line) +{ + if (!lex_opt(ls, what)) { + if (line == ls->linenumber) { + err_token(ls, what); + } else { + const char *swhat = lj_lex_token2str(ls, what); + const char *swho = lj_lex_token2str(ls, who); + lj_lex_error(ls, ls->token, LJ_ERR_XMATCH, swhat, swho, line); + } + } +} + +/* Check for string token. */ +static GCstr *lex_str(LexState *ls) +{ + GCstr *s; + if (ls->token != TK_name) + err_token(ls, TK_name); + s = strV(&ls->tokenval); + lj_lex_next(ls); + return s; +} + +/* -- Variable handling --------------------------------------------------- */ + +#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]]) + +/* Define a new local variable. */ +static void var_new(LexState *ls, BCReg n, GCstr *name) +{ + FuncState *fs = ls->fs; + MSize vtop = ls->vtop; + checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables"); + if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { + if (ls->sizevstack >= LJ_MAX_VSTACK) + lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); + lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); + } + lua_assert((uintptr_t)name < VARNAME__MAX || + lj_tab_getstr(fs->kt, name) != NULL); + /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ + setgcref(ls->vstack[vtop].name, obj2gco(name)); + fs->varmap[fs->nactvar+n] = (uint16_t)vtop; + ls->vtop = vtop+1; +} + +#define var_new_lit(ls, n, v) \ + var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1)) + +#define var_new_fixed(ls, n, vn) \ + var_new(ls, (n), (GCstr *)(uintptr_t)(vn)) + +/* Add local variables. */ +static void var_add(LexState *ls, BCReg nvars) +{ + FuncState *fs = ls->fs; + fs->nactvar = (uint8_t)(fs->nactvar + nvars); + for (; nvars; nvars--) + var_get(ls, fs, fs->nactvar - nvars).startpc = fs->pc; +} + +/* Remove local variables. */ +static void var_remove(LexState *ls, BCReg tolevel) +{ + FuncState *fs = ls->fs; + while (fs->nactvar > tolevel) + var_get(ls, fs, --fs->nactvar).endpc = fs->pc; +} + +/* Lookup local variable name. */ +static BCReg var_lookup_local(FuncState *fs, GCstr *n) +{ + int i; + for (i = fs->nactvar-1; i >= 0; i--) { + if (n == strref(var_get(fs->ls, fs, i).name)) + return (BCReg)i; + } + return (BCReg)-1; /* Not found. */ +} + +/* Lookup or add upvalue index. */ +static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e) +{ + MSize i, n = fs->nuv; + for (i = 0; i < n; i++) + if (fs->uvloc[i].vidx == vidx) + return i; /* Already exists. */ + /* Otherwise create a new one. */ + checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); + lua_assert(e->k == VLOCAL || e->k == VUPVAL); + fs->uvloc[n].vidx = (uint16_t)vidx; + fs->uvloc[n].slot = (uint16_t)(e->u.s.info | (e->k == VLOCAL ? 0x8000 : 0)); + fs->nuv = n+1; + return n; +} + +/* Forward declaration. */ +static void scope_uvmark(FuncState *fs, BCReg level); + +/* Recursively lookup variables in enclosing functions. */ +static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first) +{ + if (fs) { + BCReg reg = var_lookup_local(fs, name); + if ((int32_t)reg >= 0) { /* Local in this function? */ + expr_init(e, VLOCAL, reg); + if (!first) + scope_uvmark(fs, reg); /* Scope now has an upvalue. */ + return (MSize)fs->varmap[reg]; + } else { + MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */ + if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */ + e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e); + e->k = VUPVAL; + return vidx; + } + } + } else { /* Not found in any function, must be a global. */ + expr_init(e, VGLOBAL, 0); + e->u.sval = name; + } + return (MSize)-1; /* Global. */ +} + +/* Lookup variable name. */ +#define var_lookup(ls, e) \ + var_lookup_((ls)->fs, lex_str(ls), (e), 1) + +/* -- Function state management ------------------------------------------- */ + +/* Fixup bytecode for prototype. */ +static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n) +{ + BCInsLine *base = fs->bcbase; + MSize i; + pt->sizebc = n; + bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, + fs->framesize, 0); + for (i = 1; i < n; i++) + bc[i] = base[i].ins; +} + +/* Fixup constants for prototype. */ +static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr) +{ + GCtab *kt; + TValue *array; + Node *node; + MSize i, hmask; + checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants"); + checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants"); + setmref(pt->k, kptr); + pt->sizekn = fs->nkn; + pt->sizekgc = fs->nkgc; + kt = fs->kt; + array = tvref(kt->array); + for (i = 0; i < kt->asize; i++) + if (tvhaskslot(&array[i])) { + TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])]; + if (LJ_DUALNUM) + setintV(tv, (int32_t)i); + else + setnumV(tv, (lua_Number)i); + } + node = noderef(kt->node); + hmask = kt->hmask; + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (tvhaskslot(&n->val)) { + ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); + lua_assert(!tvisint(&n->key)); + if (tvisnum(&n->key)) { + TValue *tv = &((TValue *)kptr)[kidx]; + if (LJ_DUALNUM) { + lua_Number nn = numV(&n->key); + int32_t k = lj_num2int(nn); + lua_assert(!tvismzero(&n->key)); + if ((lua_Number)k == nn) + setintV(tv, k); + else + *tv = n->key; + } else { + *tv = n->key; + } + } else { + GCobj *o = gcV(&n->key); + setgcref(((GCRef *)kptr)[~kidx], o); + lj_gc_objbarrier(fs->L, pt, o); + } + } + } +} + +/* Fixup upvalues for prototype. */ +static void fs_fixup_uv(FuncState *fs, GCproto *pt, uint16_t *uv) +{ + MSize i, n = fs->nuv; + setmref(pt->uv, uv); + pt->sizeuv = n; + for (i = 0; i < n; i++) + uv[i] = fs->uvloc[i].slot; +} + +#ifndef LUAJIT_DISABLE_DEBUGINFO +/* Prepare lineinfo for prototype. */ +static size_t fs_prep_line(FuncState *fs, BCLine numline) +{ + return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); +} + +/* Fixup lineinfo for prototype. */ +static void fs_fixup_line(FuncState *fs, GCproto *pt, + void *lineinfo, BCLine numline) +{ + BCInsLine *base = fs->bcbase + 1; + BCLine first = fs->linedefined; + MSize i = 0, n = fs->pc-1; + pt->firstline = fs->linedefined; + pt->numline = numline; + setmref(pt->lineinfo, lineinfo); + if (LJ_LIKELY(numline < 256)) { + uint8_t *li = (uint8_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0 && delta < 256); + li[i] = (uint8_t)delta; + } while (++i < n); + } else if (LJ_LIKELY(numline < 65536)) { + uint16_t *li = (uint16_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0 && delta < 65536); + li[i] = (uint16_t)delta; + } while (++i < n); + } else { + uint32_t *li = (uint32_t *)lineinfo; + do { + BCLine delta = base[i].line - first; + lua_assert(delta >= 0); + li[i] = (uint32_t)delta; + } while (++i < n); + } +} + +/* Resize buffer if needed. */ +static LJ_NOINLINE void fs_buf_resize(LexState *ls, MSize len) +{ + MSize sz = ls->sb.sz * 2; + while (ls->sb.n + len > sz) sz = sz * 2; + lj_str_resizebuf(ls->L, &ls->sb, sz); +} + +static LJ_AINLINE void fs_buf_need(LexState *ls, MSize len) +{ + if (LJ_UNLIKELY(ls->sb.n + len > ls->sb.sz)) + fs_buf_resize(ls, len); +} + +/* Add string to buffer. */ +static void fs_buf_str(LexState *ls, const char *str, MSize len) +{ + char *p = ls->sb.buf + ls->sb.n; + MSize i; + ls->sb.n += len; + for (i = 0; i < len; i++) p[i] = str[i]; +} + +/* Add ULEB128 value to buffer. */ +static void fs_buf_uleb128(LexState *ls, uint32_t v) +{ + MSize n = ls->sb.n; + uint8_t *p = (uint8_t *)ls->sb.buf; + for (; v >= 0x80; v >>= 7) + p[n++] = (uint8_t)((v & 0x7f) | 0x80); + p[n++] = (uint8_t)v; + ls->sb.n = n; +} + +/* Prepare variable info for prototype. */ +static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar) +{ + VarInfo *vstack = fs->ls->vstack; + MSize i, n; + BCPos lastpc; + lj_str_resetbuf(&ls->sb); /* Copy to temp. string buffer. */ + /* Store upvalue names. */ + for (i = 0, n = fs->nuv; i < n; i++) { + GCstr *s = strref(vstack[fs->uvloc[i].vidx].name); + MSize len = s->len+1; + fs_buf_need(ls, len); + fs_buf_str(ls, strdata(s), len); + } + *ofsvar = ls->sb.n; + vstack += fs->vbase; + lastpc = 0; + /* Store local variable names and compressed ranges. */ + for (i = 0, n = ls->vtop - fs->vbase; i < n; i++) { + GCstr *s = strref(vstack[i].name); + BCPos startpc = vstack[i].startpc, endpc = vstack[i].endpc; + if ((uintptr_t)s < VARNAME__MAX) { + fs_buf_need(ls, 1 + 2*5); + ls->sb.buf[ls->sb.n++] = (uint8_t)(uintptr_t)s; + } else { + MSize len = s->len+1; + fs_buf_need(ls, len + 2*5); + fs_buf_str(ls, strdata(s), len); + } + fs_buf_uleb128(ls, startpc-lastpc); + fs_buf_uleb128(ls, endpc-startpc); + lastpc = startpc; + } + fs_buf_need(ls, 1); + ls->sb.buf[ls->sb.n++] = '\0'; /* Terminator for varinfo. */ + return ls->sb.n; +} + +/* Fixup variable info for prototype. */ +static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar) +{ + setmref(pt->uvinfo, p); + setmref(pt->varinfo, (char *)p + ofsvar); + memcpy(p, ls->sb.buf, ls->sb.n); /* Copy from temp. string buffer. */ +} +#else + +/* Initialize with empty debug info, if disabled. */ +#define fs_prep_line(fs, numline) (UNUSED(numline), 0) +#define fs_fixup_line(fs, pt, li, numline) \ + pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL) +#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0) +#define fs_fixup_var(ls, pt, p, ofsvar) \ + setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL) + +#endif + +/* Check if bytecode op returns. */ +static int bcopisret(BCOp op) +{ + switch (op) { + case BC_CALLMT: case BC_CALLT: + case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: + return 1; + default: + return 0; + } +} + +/* Fixup return instruction for prototype. */ +static void fs_fixup_ret(FuncState *fs) +{ + BCPos lastpc = fs->pc; + if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) { + if (fs->flags & PROTO_CHILD) + bcemit_AJ(fs, BC_UCLO, 0, 0); + bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */ + } + /* May need to fixup returns encoded before first function was created. */ + if (fs->flags & PROTO_FIXUP_RETURN) { + BCPos pc; + for (pc = 0; pc < lastpc; pc++) { + BCIns ins = fs->bcbase[pc].ins; + BCPos offset; + switch (bc_op(ins)) { + case BC_CALLMT: case BC_CALLT: + case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: + offset = bcemit_INS(fs, ins)-(pc+1)+BCBIAS_J; /* Copy return ins. */ + if (offset > BCMAX_D) + err_syntax(fs->ls, LJ_ERR_XFIXUP); + /* Replace with UCLO plus branch. */ + fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset); + break; + case BC_UCLO: + return; /* We're done. */ + default: + break; + } + } + } +} + +/* Finish a FuncState and return the new prototype. */ +static GCproto *fs_finish(LexState *ls, BCLine line) +{ + lua_State *L = ls->L; + FuncState *fs = ls->fs; + BCLine numline = line - fs->linedefined; + size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar; + GCproto *pt; + + /* Apply final fixups. */ + lua_assert(fs->bl == NULL); + fs_fixup_ret(fs); + var_remove(ls, 0); + + /* Calculate total size of prototype including all colocated arrays. */ + sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef); + sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1); + ofsk = sizept; sizept += fs->nkn*sizeof(TValue); + ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2; + ofsli = sizept; sizept += fs_prep_line(fs, numline); + ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar); + + /* Allocate prototype and initialize its fields. */ + pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept); + pt->gct = ~LJ_TPROTO; + pt->sizept = (MSize)sizept; + pt->trace = 0; + pt->flags = fs->flags; + pt->numparams = fs->numparams; + pt->framesize = fs->framesize; + setgcref(pt->chunkname, obj2gco(ls->chunkname)); + + /* Close potentially uninitialized gap between bc and kgc. */ + *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0; + fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc); + fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk)); + fs_fixup_uv(fs, pt, (uint16_t *)((char *)pt + ofsuv)); + fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline); + fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar); + + lj_vmevent_send(L, BC, + setprotoV(L, L->top++, pt); + ); + + L->top--; /* Pop table of constants. */ + ls->vtop = fs->vbase; /* Reset variable stack. */ + ls->fs = fs->prev; + lua_assert(ls->fs != NULL || ls->token == TK_eof); + return pt; +} + +/* Initialize a new FuncState. */ +static void fs_init(LexState *ls, FuncState *fs) +{ + lua_State *L = ls->L; + fs->prev = ls->fs; ls->fs = fs; /* Append to list. */ + fs->ls = ls; + fs->vbase = ls->vtop; + fs->L = L; + fs->pc = 0; + fs->lasttarget = 0; + fs->jpc = NO_JMP; + fs->freereg = 0; + fs->nkgc = 0; + fs->nkn = 0; + fs->nactvar = 0; + fs->nuv = 0; + fs->bl = NULL; + fs->flags = 0; + fs->framesize = 1; /* Minimum frame size. */ + fs->kt = lj_tab_new(L, 0, 0); + /* Anchor table of constants in stack to avoid being collected. */ + settabV(L, L->top, fs->kt); + incr_top(L); +} + +/* -- Expressions --------------------------------------------------------- */ + +/* Forward declaration. */ +static void expr(LexState *ls, ExpDesc *v); + +/* Return string expression. */ +static void expr_str(LexState *ls, ExpDesc *e) +{ + expr_init(e, VKSTR, 0); + e->u.sval = lex_str(ls); +} + +/* Return index expression. */ +static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e) +{ + /* Already called: expr_toval(fs, e). */ + t->k = VINDEXED; + if (expr_isnumk(e)) { +#if LJ_DUALNUM + if (tvisint(expr_numtv(e))) { + int32_t k = intV(expr_numtv(e)); + if (checku8(k)) { + t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ + return; + } + } +#else + lua_Number n = expr_numberV(e); + int32_t k = lj_num2int(n); + if (checku8(k) && n == (lua_Number)k) { + t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ + return; + } +#endif + } else if (expr_isstrk(e)) { + BCReg idx = const_str(fs, e); + if (idx <= BCMAX_C) { + t->u.s.aux = ~idx; /* -256..-1: const string key */ + return; + } + } + t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */ +} + +/* Parse index expression with named field. */ +static void expr_field(LexState *ls, ExpDesc *v) +{ + FuncState *fs = ls->fs; + ExpDesc key; + expr_toanyreg(fs, v); + lj_lex_next(ls); /* Skip dot or colon. */ + expr_str(ls, &key); + expr_index(fs, v, &key); +} + +/* Parse index expression with brackets. */ +static void expr_bracket(LexState *ls, ExpDesc *v) +{ + lj_lex_next(ls); /* Skip '['. */ + expr(ls, v); + expr_toval(ls->fs, v); + lex_check(ls, ']'); +} + +/* Get value of constant expression. */ +static void expr_kvalue(TValue *v, ExpDesc *e) +{ + if (e->k <= VKTRUE) { + setitype(v, ~(uint32_t)e->k); + } else if (e->k == VKSTR) { + setgcref(v->gcr, obj2gco(e->u.sval)); + setitype(v, LJ_TSTR); + } else { + lua_assert(tvisnumber(expr_numtv(e))); + *v = *expr_numtv(e); + } +} + +/* Parse table constructor expression. */ +static void expr_table(LexState *ls, ExpDesc *e) +{ + FuncState *fs = ls->fs; + BCLine line = ls->linenumber; + GCtab *t = NULL; + int vcall = 0, needarr = 0; + int32_t narr = 1; /* First array index. */ + uint32_t nhash = 0; /* Number of hash entries. */ + BCReg freg = fs->freereg; + BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0); + expr_init(e, VNONRELOC, freg); + bcreg_reserve(fs, 1); + freg++; + lex_check(ls, '{'); + while (ls->token != '}') { + ExpDesc key, val; + vcall = 0; + if (ls->token == '[') { + expr_bracket(ls, &key); /* Already calls expr_toval. */ + if (!expr_isk(&key)) expr_index(fs, e, &key); + if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++; + lex_check(ls, '='); + } else if (ls->token == TK_name && lj_lex_lookahead(ls) == '=') { + expr_str(ls, &key); + lex_check(ls, '='); + nhash++; + } else { + expr_init(&key, VKNUM, 0); + setintV(&key.u.nval, narr); + narr++; + needarr = vcall = 1; + } + expr(ls, &val); + if (expr_isk_nojump(&val) && expr_isk(&key) && key.k != VKNIL) { + TValue k; + if (!t) { /* Create template table on demand. */ + BCReg kidx; + t = lj_tab_new(fs->L, 0, 0); + kidx = const_gc(fs, obj2gco(t), LJ_TTAB); + fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); + } + vcall = 0; + expr_kvalue(&k, &key); + expr_kvalue(lj_tab_set(fs->L, t, &k), &val); + lj_gc_anybarriert(fs->L, t); + } else { + if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; } + if (expr_isk(&key)) expr_index(fs, e, &key); + bcemit_store(fs, e, &val); + } + fs->freereg = freg; + if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break; + } + lex_match(ls, '}', '{', line); + if (vcall) { + BCInsLine *ilp = &fs->bcbase[fs->pc-1]; + ExpDesc en; + lua_assert(bc_a(ilp->ins) == freg && + bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); + expr_init(&en, VKNUM, 0); + en.u.nval.u32.lo = narr-1; + en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ + if (narr > 256) { fs->pc--; ilp--; } + ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en)); + setbc_b(&ilp[-1].ins, 0); + } + if (pc == fs->pc-1) { /* Make expr relocable if possible. */ + e->u.s.info = pc; + fs->freereg--; + e->k = VRELOCABLE; + } else { + e->k = VNONRELOC; /* May have been changed by expr_index. */ + } + if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */ + BCIns *ip = &fs->bcbase[pc].ins; + if (!needarr) narr = 0; + else if (narr < 3) narr = 3; + else if (narr > 0x7ff) narr = 0x7ff; + setbc_d(ip, (uint32_t)narr|(hsize2hbits(nhash)<<11)); + } +} + +/* Parse function parameters. */ +static BCReg parse_params(LexState *ls, int needself) +{ + FuncState *fs = ls->fs; + BCReg nparams = 0; + lex_check(ls, '('); + if (needself) + var_new_lit(ls, nparams++, "self"); + if (ls->token != ')') { + do { + if (ls->token == TK_name) { + var_new(ls, nparams++, lex_str(ls)); + } else if (ls->token == TK_dots) { + lj_lex_next(ls); + fs->flags |= PROTO_VARARG; + break; + } else { + err_syntax(ls, LJ_ERR_XPARAM); + } + } while (lex_opt(ls, ',')); + } + var_add(ls, nparams); + lua_assert(fs->nactvar == nparams); + bcreg_reserve(fs, nparams); + lex_check(ls, ')'); + return nparams; +} + +/* Forward declaration. */ +static void parse_chunk(LexState *ls); + +/* Parse body of a function. */ +static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line) +{ + FuncState fs, *pfs = ls->fs; + GCproto *pt; + ptrdiff_t oldbase = pfs->bcbase - ls->bcstack; + fs_init(ls, &fs); + fs.linedefined = line; + fs.numparams = (uint8_t)parse_params(ls, needself); + fs.bcbase = pfs->bcbase + pfs->pc; + fs.bclim = pfs->bclim - pfs->pc; + bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */ + parse_chunk(ls); + if (ls->token != TK_end) lex_match(ls, TK_end, TK_function, line); + pt = fs_finish(ls, (ls->lastline = ls->linenumber)); + pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */ + pfs->bclim = (BCPos)(ls->sizebcstack - oldbase); + /* Store new prototype in the constant array of the parent. */ + expr_init(e, VRELOCABLE, + bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO))); +#if LJ_HASFFI + pfs->flags |= (fs.flags & PROTO_FFI); +#endif + if (!(pfs->flags & PROTO_CHILD)) { + if (pfs->flags & PROTO_HAS_RETURN) + pfs->flags |= PROTO_FIXUP_RETURN; + pfs->flags |= PROTO_CHILD; + } + lj_lex_next(ls); +} + +/* Parse expression list. Last expression is left open. */ +static BCReg expr_list(LexState *ls, ExpDesc *v) +{ + BCReg n = 1; + expr(ls, v); + while (lex_opt(ls, ',')) { + expr_tonextreg(ls->fs, v); + expr(ls, v); + n++; + } + return n; +} + +/* Parse function argument list. */ +static void parse_args(LexState *ls, ExpDesc *e) +{ + FuncState *fs = ls->fs; + ExpDesc args; + BCIns ins; + BCReg base; + BCLine line = ls->linenumber; + if (ls->token == '(') { + if (line != ls->lastline) + err_syntax(ls, LJ_ERR_XAMBIG); + lj_lex_next(ls); + if (ls->token == ')') { /* f(). */ + args.k = VVOID; + } else { + expr_list(ls, &args); + if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */ + setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */ + } + lex_match(ls, ')', '(', line); + } else if (ls->token == '{') { + expr_table(ls, &args); + } else if (ls->token == TK_string) { + expr_init(&args, VKSTR, 0); + args.u.sval = strV(&ls->tokenval); + lj_lex_next(ls); + } else { + err_syntax(ls, LJ_ERR_XFUNARG); + return; /* Silence compiler. */ + } + lua_assert(e->k == VNONRELOC); + base = e->u.s.info; /* Base register for call. */ + if (args.k == VCALL) { + ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1); + } else { + if (args.k != VVOID) + expr_tonextreg(fs, &args); + ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base); + } + expr_init(e, VCALL, bcemit_INS(fs, ins)); + e->u.s.aux = base; + fs->bcbase[fs->pc - 1].line = line; + fs->freereg = base+1; /* Leave one result by default. */ +} + +/* Parse primary expression. */ +static void expr_primary(LexState *ls, ExpDesc *v) +{ + FuncState *fs = ls->fs; + /* Parse prefix expression. */ + if (ls->token == '(') { + BCLine line = ls->linenumber; + lj_lex_next(ls); + expr(ls, v); + lex_match(ls, ')', '(', line); + expr_discharge(ls->fs, v); + } else if (ls->token == TK_name) { + var_lookup(ls, v); + } else { + err_syntax(ls, LJ_ERR_XSYMBOL); + } + for (;;) { /* Parse multiple expression suffixes. */ + if (ls->token == '.') { + expr_field(ls, v); + } else if (ls->token == '[') { + ExpDesc key; + expr_toanyreg(fs, v); + expr_bracket(ls, &key); + expr_index(fs, v, &key); + } else if (ls->token == ':') { + ExpDesc key; + lj_lex_next(ls); + expr_str(ls, &key); + bcemit_method(fs, v, &key); + parse_args(ls, v); + } else if (ls->token == '(' || ls->token == TK_string || ls->token == '{') { + expr_tonextreg(fs, v); + parse_args(ls, v); + } else { + break; + } + } +} + +/* Parse simple expression. */ +static void expr_simple(LexState *ls, ExpDesc *v) +{ + switch (ls->token) { + case TK_number: + expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokenval)) ? VKCDATA : VKNUM, 0); + copyTV(ls->L, &v->u.nval, &ls->tokenval); + break; + case TK_string: + expr_init(v, VKSTR, 0); + v->u.sval = strV(&ls->tokenval); + break; + case TK_nil: + expr_init(v, VKNIL, 0); + break; + case TK_true: + expr_init(v, VKTRUE, 0); + break; + case TK_false: + expr_init(v, VKFALSE, 0); + break; + case TK_dots: { /* Vararg. */ + FuncState *fs = ls->fs; + BCReg base; + checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS); + bcreg_reserve(fs, 1); + base = fs->freereg-1; + expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams)); + v->u.s.aux = base; + break; + } + case '{': /* Table constructor. */ + expr_table(ls, v); + return; + case TK_function: + lj_lex_next(ls); + parse_body(ls, v, 0, ls->linenumber); + return; + default: + expr_primary(ls, v); + return; + } + lj_lex_next(ls); +} + +/* Manage syntactic levels to avoid blowing up the stack. */ +static void synlevel_begin(LexState *ls) +{ + if (++ls->level >= LJ_MAX_XLEVEL) + lj_lex_error(ls, 0, LJ_ERR_XLEVELS); +} + +#define synlevel_end(ls) ((ls)->level--) + +/* Convert token to binary operator. */ +static BinOpr token2binop(LexToken tok) +{ + switch (tok) { + case '+': return OPR_ADD; + case '-': return OPR_SUB; + case '*': return OPR_MUL; + case '/': return OPR_DIV; + case '%': return OPR_MOD; + case '^': return OPR_POW; + case TK_concat: return OPR_CONCAT; + case TK_ne: return OPR_NE; + case TK_eq: return OPR_EQ; + case '<': return OPR_LT; + case TK_le: return OPR_LE; + case '>': return OPR_GT; + case TK_ge: return OPR_GE; + case TK_and: return OPR_AND; + case TK_or: return OPR_OR; + default: return OPR_NOBINOPR; + } +} + +/* Priorities for each binary operator. ORDER OPR. */ +static const struct { + uint8_t left; /* Left priority. */ + uint8_t right; /* Right priority. */ +} priority[] = { + {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */ + {10,9}, {5,4}, /* POW CONCAT (right associative) */ + {3,3}, {3,3}, /* EQ NE */ + {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */ + {2,2}, {1,1} /* AND OR */ +}; + +#define UNARY_PRIORITY 8 /* Priority for unary operators. */ + +/* Forward declaration. */ +static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit); + +/* Parse unary expression. */ +static void expr_unop(LexState *ls, ExpDesc *v) +{ + BCOp op; + if (ls->token == TK_not) { + op = BC_NOT; + } else if (ls->token == '-') { + op = BC_UNM; + } else if (ls->token == '#') { + op = BC_LEN; + } else { + expr_simple(ls, v); + return; + } + lj_lex_next(ls); + expr_binop(ls, v, UNARY_PRIORITY); + bcemit_unop(ls->fs, op, v); +} + +/* Parse binary expressions with priority higher than the limit. */ +static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit) +{ + BinOpr op; + synlevel_begin(ls); + expr_unop(ls, v); + op = token2binop(ls->token); + while (op != OPR_NOBINOPR && priority[op].left > limit) { + ExpDesc v2; + BinOpr nextop; + lj_lex_next(ls); + bcemit_binop_left(ls->fs, op, v); + /* Parse binary expression with higher priority. */ + nextop = expr_binop(ls, &v2, priority[op].right); + bcemit_binop(ls->fs, op, v, &v2); + op = nextop; + } + synlevel_end(ls); + return op; /* Return unconsumed binary operator (if any). */ +} + +/* Parse expression. */ +static void expr(LexState *ls, ExpDesc *v) +{ + expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */ +} + +/* Assign expression to the next register. */ +static void expr_next(LexState *ls) +{ + ExpDesc e; + expr(ls, &e); + expr_tonextreg(ls->fs, &e); +} + +/* Parse conditional expression. */ +static BCPos expr_cond(LexState *ls) +{ + ExpDesc v; + expr(ls, &v); + if (v.k == VKNIL) v.k = VKFALSE; + bcemit_branch_t(ls->fs, &v); + return v.f; +} + +/* -- Scope handling ------------------------------------------------------ */ + +/* Begin a scope. */ +static void scope_begin(FuncState *fs, FuncScope *bl, int isbreakable) +{ + bl->breaklist = NO_JMP; + bl->isbreakable = (uint8_t)isbreakable; + bl->nactvar = (uint8_t)fs->nactvar; + bl->upval = 0; + bl->prev = fs->bl; + fs->bl = bl; + lua_assert(fs->freereg == fs->nactvar); +} + +/* End a scope. */ +static void scope_end(FuncState *fs) +{ + FuncScope *bl = fs->bl; + fs->bl = bl->prev; + var_remove(fs->ls, bl->nactvar); + fs->freereg = fs->nactvar; + lua_assert(bl->nactvar == fs->nactvar); + /* A scope is either breakable or has upvalues. */ + lua_assert(!bl->isbreakable || !bl->upval); + if (bl->upval) + bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); + else /* Avoid in upval case, it clears lasttarget and kills UCLO+JMP join. */ + jmp_tohere(fs, bl->breaklist); +} + +/* Mark scope as having an upvalue. */ +static void scope_uvmark(FuncState *fs, BCReg level) +{ + FuncScope *bl; + for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev) + ; + if (bl) + bl->upval = 1; +} + +/* Parse 'break' statement. */ +static void parse_break(LexState *ls) +{ + FuncState *fs = ls->fs; + FuncScope *bl; + BCReg savefr; + int upval = 0; + for (bl = fs->bl; bl && !bl->isbreakable; bl = bl->prev) + upval |= bl->upval; /* Collect upvalues in intervening scopes. */ + if (!bl) /* Error if no breakable scope found. */ + err_syntax(ls, LJ_ERR_XBREAK); + savefr = fs->freereg; + fs->freereg = bl->nactvar; /* Shrink slots to help data-flow analysis. */ + if (upval) + bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); /* Close upvalues. */ + jmp_append(fs, &bl->breaklist, bcemit_jmp(fs)); + fs->freereg = savefr; +} + +/* Check for end of block. */ +static int endofblock(LexToken token) +{ + switch (token) { + case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof: + return 1; + default: + return 0; + } +} + +/* Parse 'return' statement. */ +static void parse_return(LexState *ls) +{ + BCIns ins; + FuncState *fs = ls->fs; + lj_lex_next(ls); /* Skip 'return'. */ + fs->flags |= PROTO_HAS_RETURN; + if (endofblock(ls->token) || ls->token == ';') { /* Bare return. */ + ins = BCINS_AD(BC_RET0, 0, 1); + } else { /* Return with one or more values. */ + ExpDesc e; /* Receives the _last_ expression in the list. */ + BCReg nret = expr_list(ls, &e); + if (nret == 1) { /* Return one result. */ + if (e.k == VCALL) { /* Check for tail call. */ + BCIns *ip = bcptr(fs, &e); + /* It doesn't pay off to add BC_VARGT just for 'return ...'. */ + if (bc_op(*ip) == BC_VARG) goto notailcall; + fs->pc--; + ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip)); + } else { /* Can return the result from any register. */ + ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2); + } + } else { + if (e.k == VCALL) { /* Append all results from a call. */ + notailcall: + setbc_b(bcptr(fs, &e), 0); + ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar); + } else { + expr_tonextreg(fs, &e); /* Force contiguous registers. */ + ins = BCINS_AD(BC_RET, fs->nactvar, nret+1); + } + } + } + if (fs->flags & PROTO_CHILD) + bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */ + bcemit_INS(fs, ins); +} + +/* Parse a block. */ +static void parse_block(LexState *ls) +{ + FuncState *fs = ls->fs; + FuncScope bl; + scope_begin(fs, &bl, 0); + parse_chunk(ls); + lua_assert(bl.breaklist == NO_JMP); + scope_end(fs); +} + +/* -- Assignments --------------------------------------------------------- */ + +/* List of LHS variables. */ +typedef struct LHSVarList { + ExpDesc v; /* LHS variable. */ + struct LHSVarList *prev; /* Link to previous LHS variable. */ +} LHSVarList; + +/* Eliminate write-after-read hazards for local variable assignment. */ +static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v) +{ + FuncState *fs = ls->fs; + BCReg reg = v->u.s.info; /* Check against this variable. */ + BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */ + int hazard = 0; + for (; lh; lh = lh->prev) { + if (lh->v.k == VINDEXED) { + if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */ + hazard = 1; + lh->v.u.s.info = tmp; + } + if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */ + hazard = 1; + lh->v.u.s.aux = tmp; + } + } + } + if (hazard) { + bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */ + bcreg_reserve(fs, 1); + } +} + +/* Adjust LHS/RHS of an assignment. */ +static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e) +{ + FuncState *fs = ls->fs; + int32_t extra = (int32_t)nvars - (int32_t)nexps; + if (e->k == VCALL) { + extra++; /* Compensate for the VCALL itself. */ + if (extra < 0) extra = 0; + setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */ + if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1); + } else { + if (e->k != VVOID) + expr_tonextreg(fs, e); /* Close last expression. */ + if (extra > 0) { /* Leftover LHS are set to nil. */ + BCReg reg = fs->freereg; + bcreg_reserve(fs, (BCReg)extra); + bcemit_nil(fs, reg, (BCReg)extra); + } + } +} + +/* Recursively parse assignment statement. */ +static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars) +{ + ExpDesc e; + checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX); + if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */ + LHSVarList vl; + vl.prev = lh; + expr_primary(ls, &vl.v); + if (vl.v.k == VLOCAL) + assign_hazard(ls, lh, &vl.v); + checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names"); + parse_assignment(ls, &vl, nvars+1); + } else { /* Parse RHS. */ + BCReg nexps; + lex_check(ls, '='); + nexps = expr_list(ls, &e); + if (nexps == nvars) { + if (e.k == VCALL) { + if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */ + ls->fs->freereg--; + e.k = VRELOCABLE; + } else { /* Multiple call results. */ + e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */ + e.k = VNONRELOC; + } + } + bcemit_store(ls->fs, &lh->v, &e); + return; + } + assign_adjust(ls, nvars, nexps, &e); + if (nexps > nvars) + ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */ + } + /* Assign RHS to LHS and recurse downwards. */ + expr_init(&e, VNONRELOC, ls->fs->freereg-1); + bcemit_store(ls->fs, &lh->v, &e); +} + +/* Parse call statement or assignment. */ +static void parse_call_assign(LexState *ls) +{ + FuncState *fs = ls->fs; + LHSVarList vl; + expr_primary(ls, &vl.v); + if (vl.v.k == VCALL) { /* Function call statement. */ + setbc_b(bcptr(fs, &vl.v), 1); /* No results. */ + } else { /* Start of an assignment. */ + vl.prev = NULL; + parse_assignment(ls, &vl, 1); + } +} + +/* Parse 'local' statement. */ +static void parse_local(LexState *ls) +{ + if (lex_opt(ls, TK_function)) { /* Local function declaration. */ + ExpDesc v, b; + FuncState *fs = ls->fs; + var_new(ls, 0, lex_str(ls)); + expr_init(&v, VLOCAL, fs->freereg); + bcreg_reserve(fs, 1); + var_add(ls, 1); + parse_body(ls, &b, 0, ls->linenumber); + bcemit_store(fs, &v, &b); + /* The upvalue is in scope, but the local is only valid after the store. */ + var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc; + } else { /* Local variable declaration. */ + ExpDesc e; + BCReg nexps, nvars = 0; + do { /* Collect LHS. */ + var_new(ls, nvars++, lex_str(ls)); + } while (lex_opt(ls, ',')); + if (lex_opt(ls, '=')) { /* Optional RHS. */ + nexps = expr_list(ls, &e); + } else { /* Or implicitly set to nil. */ + e.k = VVOID; + nexps = 0; + } + assign_adjust(ls, nvars, nexps, &e); + var_add(ls, nvars); + } +} + +/* Parse 'function' statement. */ +static void parse_func(LexState *ls, BCLine line) +{ + FuncState *fs; + ExpDesc v, b; + int needself = 0; + lj_lex_next(ls); /* Skip 'function'. */ + /* Parse function name. */ + var_lookup(ls, &v); + while (ls->token == '.') /* Multiple dot-separated fields. */ + expr_field(ls, &v); + if (ls->token == ':') { /* Optional colon to signify method call. */ + needself = 1; + expr_field(ls, &v); + } + parse_body(ls, &b, needself, line); + fs = ls->fs; + bcemit_store(fs, &v, &b); + fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */ +} + +/* -- Loop and conditional statements ------------------------------------- */ + +/* Parse 'while' statement. */ +static void parse_while(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos start, loop, condexit; + FuncScope bl; + lj_lex_next(ls); /* Skip 'while'. */ + start = fs->lasttarget = fs->pc; + condexit = expr_cond(ls); + scope_begin(fs, &bl, 1); + lex_check(ls, TK_do); + loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); + parse_block(ls); + jmp_patch(fs, bcemit_jmp(fs), start); + lex_match(ls, TK_end, TK_while, line); + scope_end(fs); + jmp_tohere(fs, condexit); + jmp_patchins(fs, loop, fs->pc); +} + +/* Parse 'repeat' statement. */ +static void parse_repeat(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos loop = fs->lasttarget = fs->pc; + BCPos condexit; + FuncScope bl1, bl2; + scope_begin(fs, &bl1, 1); /* Breakable loop scope. */ + scope_begin(fs, &bl2, 0); /* Inner scope. */ + lj_lex_next(ls); /* Skip 'repeat'. */ + bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); + parse_chunk(ls); + lex_match(ls, TK_until, TK_repeat, line); + condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */ + if (!bl2.upval) { /* No upvalues? Just end inner scope. */ + scope_end(fs); + } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */ + parse_break(ls); /* Break from loop and close upvalues. */ + jmp_tohere(fs, condexit); + scope_end(fs); /* End inner scope and close upvalues. */ + condexit = bcemit_jmp(fs); + } + jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */ + jmp_patchins(fs, loop, fs->pc); + scope_end(fs); /* End loop scope. */ +} + +/* Parse numeric 'for'. */ +static void parse_for_num(LexState *ls, GCstr *varname, BCLine line) +{ + FuncState *fs = ls->fs; + BCReg base = fs->freereg; + FuncScope bl; + BCPos loop, loopend; + /* Hidden control variables. */ + var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX); + var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP); + var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP); + /* Visible copy of index variable. */ + var_new(ls, FORL_EXT, varname); + lex_check(ls, '='); + expr_next(ls); + lex_check(ls, ','); + expr_next(ls); + if (lex_opt(ls, ',')) { + expr_next(ls); + } else { + bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */ + bcreg_reserve(fs, 1); + } + var_add(ls, 3); /* Hidden control variables. */ + lex_check(ls, TK_do); + loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP); + scope_begin(fs, &bl, 0); /* Scope for visible variables. */ + var_add(ls, 1); + bcreg_reserve(fs, 1); + parse_block(ls); + scope_end(fs); + /* Perform loop inversion. Loop control instructions are at the end. */ + loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP); + fs->bcbase[loopend].line = line; /* Fix line for control ins. */ + jmp_patchins(fs, loopend, loop+1); + jmp_patchins(fs, loop, fs->pc); +} + +/* Try to predict whether the iterator is next() and specialize the bytecode. +** Detecting next() and pairs() by name is simplistic, but quite effective. +** The interpreter backs off if the check for the closure fails at runtime. +*/ +static int predict_next(LexState *ls, FuncState *fs, BCPos pc) +{ + BCIns ins = fs->bcbase[pc].ins; + GCstr *name; + cTValue *o; + switch (bc_op(ins)) { + case BC_MOV: + name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name)); + break; + case BC_UGET: + name = gco2str(gcref(ls->vstack[fs->uvloc[bc_d(ins)].vidx].name)); + break; + case BC_GGET: + /* There's no inverse index (yet), so lookup the strings. */ + o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs")); + if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) + return 1; + o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next")); + if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) + return 1; + return 0; + default: + return 0; + } + return (name->len == 5 && !strcmp(strdata(name), "pairs")) || + (name->len == 4 && !strcmp(strdata(name), "next")); +} + +/* Parse 'for' iterator. */ +static void parse_for_iter(LexState *ls, GCstr *indexname) +{ + FuncState *fs = ls->fs; + ExpDesc e; + BCReg nvars = 0; + BCLine line; + BCReg base = fs->freereg + 3; + BCPos loop, loopend, exprpc = fs->pc; + FuncScope bl; + int isnext; + /* Hidden control variables. */ + var_new_fixed(ls, nvars++, VARNAME_FOR_GEN); + var_new_fixed(ls, nvars++, VARNAME_FOR_STATE); + var_new_fixed(ls, nvars++, VARNAME_FOR_CTL); + /* Visible variables returned from iterator. */ + var_new(ls, nvars++, indexname); + while (lex_opt(ls, ',')) + var_new(ls, nvars++, lex_str(ls)); + lex_check(ls, TK_in); + line = ls->linenumber; + assign_adjust(ls, 3, expr_list(ls, &e), &e); + bcreg_bump(fs, 3); /* The iterator needs another 3 slots (func + 2 args). */ + isnext = (nvars <= 5 && predict_next(ls, fs, exprpc)); + var_add(ls, 3); /* Hidden control variables. */ + lex_check(ls, TK_do); + loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP); + scope_begin(fs, &bl, 0); /* Scope for visible variables. */ + var_add(ls, nvars-3); + bcreg_reserve(fs, nvars-3); + parse_block(ls); + scope_end(fs); + /* Perform loop inversion. Loop control instructions are at the end. */ + jmp_patchins(fs, loop, fs->pc); + bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1); + loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP); + fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */ + fs->bcbase[loopend].line = line; + jmp_patchins(fs, loopend, loop+1); +} + +/* Parse 'for' statement. */ +static void parse_for(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + GCstr *varname; + FuncScope bl; + scope_begin(fs, &bl, 1); /* Breakable loop scope. */ + lj_lex_next(ls); /* Skip 'for'. */ + varname = lex_str(ls); /* Get first variable name. */ + if (ls->token == '=') + parse_for_num(ls, varname, line); + else if (ls->token == ',' || ls->token == TK_in) + parse_for_iter(ls, varname); + else + err_syntax(ls, LJ_ERR_XFOR); + lex_match(ls, TK_end, TK_for, line); + scope_end(fs); /* Resolve break list. */ +} + +/* Parse condition and 'then' block. */ +static BCPos parse_then(LexState *ls) +{ + BCPos condexit; + lj_lex_next(ls); /* Skip 'if' or 'elseif'. */ + condexit = expr_cond(ls); + lex_check(ls, TK_then); + parse_block(ls); + return condexit; +} + +/* Parse 'if' statement. */ +static void parse_if(LexState *ls, BCLine line) +{ + FuncState *fs = ls->fs; + BCPos flist; + BCPos escapelist = NO_JMP; + flist = parse_then(ls); + while (ls->token == TK_elseif) { /* Parse multiple 'elseif' blocks. */ + jmp_append(fs, &escapelist, bcemit_jmp(fs)); + jmp_tohere(fs, flist); + flist = parse_then(ls); + } + if (ls->token == TK_else) { /* Parse optional 'else' block. */ + jmp_append(fs, &escapelist, bcemit_jmp(fs)); + jmp_tohere(fs, flist); + lj_lex_next(ls); /* Skip 'else'. */ + parse_block(ls); + } else { + jmp_append(fs, &escapelist, flist); + } + jmp_tohere(fs, escapelist); + lex_match(ls, TK_end, TK_if, line); +} + +/* -- Parse statements ---------------------------------------------------- */ + +/* Parse a statement. Returns 1 if it must be the last one in a chunk. */ +static int parse_stmt(LexState *ls) +{ + BCLine line = ls->linenumber; + switch (ls->token) { + case TK_if: + parse_if(ls, line); + break; + case TK_while: + parse_while(ls, line); + break; + case TK_do: + lj_lex_next(ls); + parse_block(ls); + lex_match(ls, TK_end, TK_do, line); + break; + case TK_for: + parse_for(ls, line); + break; + case TK_repeat: + parse_repeat(ls, line); + break; + case TK_function: + parse_func(ls, line); + break; + case TK_local: + lj_lex_next(ls); + parse_local(ls); + break; + case TK_return: + parse_return(ls); + return 1; /* Must be last. */ + case TK_break: + lj_lex_next(ls); + parse_break(ls); + return 1; /* Must be last. */ +#ifdef LUAJIT_ENABLE_LUA52COMPAT + case ';': + lj_lex_next(ls); + break; +#endif + default: + parse_call_assign(ls); + break; + } + return 0; +} + +/* A chunk is a list of statements optionally separated by semicolons. */ +static void parse_chunk(LexState *ls) +{ + int islast = 0; + synlevel_begin(ls); + while (!islast && !endofblock(ls->token)) { + islast = parse_stmt(ls); + lex_opt(ls, ';'); + lua_assert(ls->fs->framesize >= ls->fs->freereg && + ls->fs->freereg >= ls->fs->nactvar); + ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ + } + synlevel_end(ls); +} + +/* Entry point of bytecode parser. */ +GCproto *lj_parse(LexState *ls) +{ + FuncState fs; + GCproto *pt; + lua_State *L = ls->L; +#ifdef LUAJIT_DISABLE_DEBUGINFO + ls->chunkname = lj_str_newlit(L, "="); +#else + ls->chunkname = lj_str_newz(L, ls->chunkarg); +#endif + setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */ + incr_top(L); + ls->level = 0; + fs_init(ls, &fs); + fs.linedefined = 0; + fs.numparams = 0; + fs.bcbase = NULL; + fs.bclim = 0; + fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */ + bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */ + lj_lex_next(ls); /* Read-ahead first token. */ + parse_chunk(ls); + if (ls->token != TK_eof) + err_token(ls, TK_eof); + pt = fs_finish(ls, ls->linenumber); + L->top--; /* Drop chunkname. */ + lua_assert(fs.prev == NULL); + lua_assert(ls->fs == NULL); + lua_assert(pt->sizeuv == 0); + return pt; +} + diff --git a/src/luajit2/src/lj_parse.h b/src/luajit2/src/lj_parse.h new file mode 100644 index 0000000000..0916535926 --- /dev/null +++ b/src/luajit2/src/lj_parse.h @@ -0,0 +1,18 @@ +/* +** Lua parser (source code -> bytecode). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_PARSE_H +#define _LJ_PARSE_H + +#include "lj_obj.h" +#include "lj_lex.h" + +LJ_FUNC GCproto *lj_parse(LexState *ls); +LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l); +#if LJ_HASFFI +LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd); +#endif + +#endif diff --git a/src/luajit2/src/lj_record.c b/src/luajit2/src/lj_record.c new file mode 100644 index 0000000000..63d5e4c1ea --- /dev/null +++ b/src/luajit2/src/lj_record.c @@ -0,0 +1,2166 @@ +/* +** Trace recorder (bytecode -> SSA IR). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_record_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_meta.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ff.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_ircall.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_record.h" +#include "lj_ffrecord.h" +#include "lj_snap.h" +#include "lj_dispatch.h" +#include "lj_vm.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* Pass IR on to next optimization in chain (FOLD). */ +#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) + +/* Emit raw IR without passing through optimizations. */ +#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J)) + +/* -- Sanity checks ------------------------------------------------------- */ + +#ifdef LUA_USE_ASSERT +/* Sanity check the whole IR -- sloooow. */ +static void rec_check_ir(jit_State *J) +{ + IRRef i, nins = J->cur.nins, nk = J->cur.nk; + lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536); + for (i = nins-1; i >= nk; i--) { + IRIns *ir = IR(i); + uint32_t mode = lj_ir_mode[ir->o]; + IRRef op1 = ir->op1; + IRRef op2 = ir->op2; + switch (irm_op1(mode)) { + case IRMnone: lua_assert(op1 == 0); break; + case IRMref: lua_assert(op1 >= nk); + lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break; + case IRMlit: break; + case IRMcst: lua_assert(i < REF_BIAS); continue; + } + switch (irm_op2(mode)) { + case IRMnone: lua_assert(op2 == 0); break; + case IRMref: lua_assert(op2 >= nk); + lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break; + case IRMlit: break; + case IRMcst: lua_assert(0); break; + } + if (ir->prev) { + lua_assert(ir->prev >= nk); + lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i); + lua_assert(IR(ir->prev)->o == ir->o); + } + } +} + +/* Compare stack slots and frames of the recorder and the VM. */ +static void rec_check_slots(jit_State *J) +{ + BCReg s, nslots = J->baseslot + J->maxslot; + int32_t depth = 0; + cTValue *base = J->L->base - J->baseslot; + lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS); + lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME)); + lua_assert(nslots < LJ_MAX_JSLOTS); + for (s = 0; s < nslots; s++) { + TRef tr = J->slot[s]; + if (tr) { + cTValue *tv = &base[s]; + IRRef ref = tref_ref(tr); + IRIns *ir; + lua_assert(ref >= J->cur.nk && ref < J->cur.nins); + ir = IR(ref); + lua_assert(irt_t(ir->t) == tref_t(tr)); + if (s == 0) { + lua_assert(tref_isfunc(tr)); + } else if ((tr & TREF_FRAME)) { + GCfunc *fn = gco2func(frame_gc(tv)); + BCReg delta = (BCReg)(tv - frame_prev(tv)); + lua_assert(tref_isfunc(tr)); + if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir)); + lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta)); + depth++; + } else if ((tr & TREF_CONT)) { + lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void)); + lua_assert((J->slot[s+1] & TREF_FRAME)); + depth++; + } else { + if (tvisnumber(tv)) + lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */ + else + lua_assert(itype2irt(tv) == tref_type(tr)); + if (tref_isk(tr)) { /* Compare constants. */ + TValue tvk; + lj_ir_kvalue(J->L, &tvk, ir); + if (!(tvisnum(&tvk) && tvisnan(&tvk))) + lua_assert(lj_obj_equal(tv, &tvk)); + else + lua_assert(tvisnum(tv) && tvisnan(tv)); + } + } + } + } + lua_assert(J->framedepth == depth); +} +#endif + +/* -- Type handling and specialization ------------------------------------ */ + +/* Note: these functions return tagged references (TRef). */ + +/* Specialize a slot to a specific type. Note: slot can be negative! */ +static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode) +{ + /* Caller may set IRT_GUARD in t. */ + TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode); + J->base[slot] = ref; + return ref; +} + +/* Specialize a slot to the runtime type. Note: slot can be negative! */ +static TRef sload(jit_State *J, int32_t slot) +{ + IRType t = itype2irt(&J->L->base[slot]); + TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot, + IRSLOAD_TYPECHECK); + if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */ + J->base[slot] = ref; + return ref; +} + +/* Get TRef from slot. Load slot and specialize if not done already. */ +#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s))) + +/* Get TRef for current function. */ +static TRef getcurrf(jit_State *J) +{ + if (J->base[-1]) + return J->base[-1]; + lua_assert(J->baseslot == 1); + return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY); +} + +/* Compare for raw object equality. +** Returns 0 if the objects are the same. +** Returns 1 if they are different, but the same type. +** Returns 2 for two different types. +** Comparisons between primitives always return 1 -- no caller cares about it. +*/ +int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv) +{ + int diff = !lj_obj_equal(av, bv); + if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */ + IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a); + IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b); + if (ta != tb) { + /* Widen mixed number/int comparisons to number/number comparison. */ + if (ta == IRT_INT && tb == IRT_NUM) { + a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT); + ta = IRT_NUM; + } else if (ta == IRT_NUM && tb == IRT_INT) { + b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT); + } else { + return 2; /* Two different types are never equal. */ + } + } + emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b); + } + return diff; +} + +/* -- Record loop ops ----------------------------------------------------- */ + +/* Loop event. */ +typedef enum { + LOOPEV_LEAVE, /* Loop is left or not entered. */ + LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */ + LOOPEV_ENTER /* Loop is entered. */ +} LoopEvent; + +/* Canonicalize slots: convert integers to numbers. */ +static void canonicalize_slots(jit_State *J) +{ + BCReg s; + if (LJ_DUALNUM) return; + for (s = J->baseslot+J->maxslot-1; s >= 1; s--) { + TRef tr = J->slot[s]; + if (tref_isinteger(tr)) { + IRIns *ir = IR(tref_ref(tr)); + if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY))) + J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); + } + } +} + +/* Stop recording. */ +static void rec_stop(jit_State *J, TraceNo lnk) +{ + lj_trace_end(J); + J->cur.link = (uint16_t)lnk; + /* Looping back at the same stack level? */ + if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) { + if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */ + goto nocanon; /* Do not canonicalize or we lose the narrowing. */ + if (J->cur.root) /* Otherwise ensure we always link to the root trace. */ + J->cur.link = J->cur.root; + } + canonicalize_slots(J); +nocanon: + /* Note: all loop ops must set J->pc to the following instruction! */ + lj_snap_add(J); /* Add loop snapshot. */ + J->needsnap = 0; + J->mergesnap = 1; /* In case recording continues. */ +} + +/* Search bytecode backwards for a int/num constant slot initializer. */ +static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t) +{ + /* This algorithm is rather simplistic and assumes quite a bit about + ** how the bytecode is generated. It works fine for FORI initializers, + ** but it won't necessarily work in other cases (e.g. iterator arguments). + ** It doesn't do anything fancy, either (like backpropagating MOVs). + */ + const BCIns *pc, *startpc = proto_bc(J->pt); + for (pc = endpc-1; pc > startpc; pc--) { + BCIns ins = *pc; + BCOp op = bc_op(ins); + /* First try to find the last instruction that stores to this slot. */ + if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) { + return 0; /* Multiple results, e.g. from a CALL or KNIL. */ + } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) { + if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */ + /* Now try to verify there's no forward jump across it. */ + const BCIns *kpc = pc; + for (; pc > startpc; pc--) + if (bc_op(*pc) == BC_JMP) { + const BCIns *target = pc+bc_j(*pc)+1; + if (target > kpc && target <= endpc) + return 0; /* Conditional assignment. */ + } + if (op == BC_KSHORT) { + int32_t k = (int32_t)(int16_t)bc_d(ins); + return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k); + } else { + cTValue *tv = proto_knumtv(J->pt, bc_d(ins)); + if (t == IRT_INT) { + int32_t k = numberVint(tv); + if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */ + return lj_ir_kint(J, k); + return 0; /* Type mismatch. */ + } else { + return lj_ir_knum(J, numberVnum(tv)); + } + } + } + return 0; /* Non-constant initializer. */ + } + } + return 0; /* No assignment to this slot found? */ +} + +/* Load and optionally convert a FORI argument from a slot. */ +static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode) +{ + int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0; + return sloadt(J, (int32_t)slot, + t + (((mode & IRSLOAD_TYPECHECK) || + (conv && t == IRT_INT && !(mode >> 16))) ? + IRT_GUARD : 0), + mode + conv); +} + +/* Peek before FORI to find a const initializer. Otherwise load from slot. */ +static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot, + IRType t, int mode) +{ + TRef tr = J->base[slot]; + if (!tr) { + tr = find_kinit(J, fori, slot, t); + if (!tr) + tr = fori_load(J, slot, t, mode); + } + return tr; +} + +/* Return the direction of the FOR loop iterator. +** It's important to exactly reproduce the semantics of the interpreter. +*/ +static int rec_for_direction(cTValue *o) +{ + return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0; +} + +/* Simulate the runtime behavior of the FOR loop iterator. */ +static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl) +{ + lua_Number stopv = numberVnum(&o[FORL_STOP]); + lua_Number idxv = numberVnum(&o[FORL_IDX]); + lua_Number stepv = numberVnum(&o[FORL_STEP]); + if (isforl) + idxv += stepv; + if (rec_for_direction(&o[FORL_STEP])) { + if (idxv <= stopv) { + *op = IR_LE; + return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; + } + *op = IR_GT; return LOOPEV_LEAVE; + } else { + if (stopv <= idxv) { + *op = IR_GE; + return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER; + } + *op = IR_LT; return LOOPEV_LEAVE; + } +} + +/* Record checks for FOR loop overflow and step direction. */ +static void rec_for_check(jit_State *J, IRType t, int dir, + TRef stop, TRef step, int init) +{ + if (!tref_isk(step)) { + /* Non-constant step: need a guard for the direction. */ + TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J); + emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero); + /* Add hoistable overflow checks for a narrowed FORL index. */ + if (init && t == IRT_INT) { + if (tref_isk(stop)) { + /* Constant stop: optimize check away or to a range check for step. */ + int32_t k = IR(tref_ref(stop))->i; + if (dir) { + if (k > 0) + emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k)); + } else { + if (k < 0) + emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k)); + } + } else { + /* Stop+step variable: need full overflow check. */ + TRef tr = emitir(IRTGI(IR_ADDOV), step, stop); + emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */ + } + } + } else if (init && t == IRT_INT && !tref_isk(stop)) { + /* Constant step: optimize overflow check to a range check for stop. */ + int32_t k = IR(tref_ref(step))->i; + k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k; + emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k)); + } +} + +/* Record a FORL instruction. */ +static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev, + int init) +{ + BCReg ra = bc_a(*fori); + cTValue *tv = &J->L->base[ra]; + TRef idx = J->base[ra+FORL_IDX]; + IRType t = idx ? tref_type(idx) : + (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM; + int mode = IRSLOAD_INHERIT + + ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0); + TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); + TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); + int tc, dir = rec_for_direction(&tv[FORL_STEP]); + lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI); + scev->t.irt = t; + scev->dir = dir; + scev->stop = tref_ref(stop); + scev->step = tref_ref(step); + rec_for_check(J, t, dir, stop, step, init); + scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT)); + tc = (LJ_DUALNUM && + !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) && + tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ? + IRSLOAD_TYPECHECK : 0; + if (tc) { + J->base[ra+FORL_STOP] = stop; + J->base[ra+FORL_STEP] = step; + } + if (!idx) + idx = fori_load(J, ra+FORL_IDX, t, + IRSLOAD_INHERIT + tc + (J->scev.start << 16)); + if (!init) + J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step); + J->base[ra+FORL_EXT] = idx; + scev->idx = tref_ref(idx); + J->maxslot = ra+FORL_EXT+1; +} + +/* Record FORL/JFORL or FORI/JFORI. */ +static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl) +{ + BCReg ra = bc_a(*fori); + TValue *tv = &J->L->base[ra]; + TRef *tr = &J->base[ra]; + IROp op; + LoopEvent ev; + TRef stop; + IRType t; + if (isforl) { /* Handle FORL/JFORL opcodes. */ + TRef idx = tr[FORL_IDX]; + if (tref_ref(idx) == J->scev.idx) { + t = J->scev.t.irt; + stop = J->scev.stop; + idx = emitir(IRT(IR_ADD, t), idx, J->scev.step); + tr[FORL_EXT] = tr[FORL_IDX] = idx; + } else { + ScEvEntry scev; + rec_for_loop(J, fori, &scev, 0); + t = scev.t.irt; + stop = scev.stop; + } + } else { /* Handle FORI/JFORI opcodes. */ + BCReg i; + lj_meta_for(J->L, tv); + t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) : + IRT_NUM; + for (i = FORL_IDX; i <= FORL_STEP; i++) { + lua_assert(tref_isnumber_str(tr[i])); + if (tref_isstr(tr[i])) + tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); + if (t == IRT_INT) { + if (!tref_isinteger(tr[i])) + tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK); + } else { + if (!tref_isnum(tr[i])) + tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT); + } + } + tr[FORL_EXT] = tr[FORL_IDX]; + stop = tr[FORL_STOP]; + rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]), + stop, tr[FORL_STEP], 1); + } + + ev = rec_for_iter(&op, tv, isforl); + if (ev == LOOPEV_LEAVE) { + J->maxslot = ra+FORL_EXT+1; + J->pc = fori+1; + } else { + J->maxslot = ra; + J->pc = fori+bc_j(*fori)+1; + } + lj_snap_add(J); + + emitir(IRTG(op, t), tr[FORL_IDX], stop); + + if (ev == LOOPEV_LEAVE) { + J->maxslot = ra; + J->pc = fori+bc_j(*fori)+1; + } else { + J->maxslot = ra+FORL_EXT+1; + J->pc = fori+1; + } + J->needsnap = 1; + return ev; +} + +/* Record ITERL/JITERL. */ +static LoopEvent rec_iterl(jit_State *J, const BCIns iterins) +{ + BCReg ra = bc_a(iterins); + lua_assert(J->base[ra] != 0); + if (!tref_isnil(J->base[ra])) { /* Looping back? */ + J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */ + J->maxslot = ra-1+bc_b(J->pc[-1]); + J->pc += bc_j(iterins)+1; + return LOOPEV_ENTER; + } else { + J->maxslot = ra-3; + J->pc++; + return LOOPEV_LEAVE; + } +} + +/* Record LOOP/JLOOP. Now, that was easy. */ +static LoopEvent rec_loop(jit_State *J, BCReg ra) +{ + if (ra < J->maxslot) J->maxslot = ra; + J->pc++; + return LOOPEV_ENTER; +} + +/* Check if a loop repeatedly failed to trace because it didn't loop back. */ +static int innerloopleft(jit_State *J, const BCIns *pc) +{ + ptrdiff_t i; + for (i = 0; i < PENALTY_SLOTS; i++) + if (mref(J->penalty[i].pc, const BCIns) == pc) { + if ((J->penalty[i].reason == LJ_TRERR_LLEAVE || + J->penalty[i].reason == LJ_TRERR_LINNER) && + J->penalty[i].val >= 2*PENALTY_MIN) + return 1; + break; + } + return 0; +} + +/* Handle the case when an interpreted loop op is hit. */ +static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev) +{ + if (J->parent == 0) { + if (pc == J->startpc && J->framedepth + J->retdepth == 0) { + /* Same loop? */ + if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */ + lj_trace_err(J, LJ_TRERR_LLEAVE); + rec_stop(J, J->cur.traceno); /* Root trace forms a loop. */ + } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */ + /* It's usually better to abort here and wait until the inner loop + ** is traced. But if the inner loop repeatedly didn't loop back, + ** this indicates a low trip count. In this case try unrolling + ** an inner loop even in a root trace. But it's better to be a bit + ** more conservative here and only do it for very short loops. + */ + if (!innerloopleft(J, pc)) + lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */ + if ((ev != LOOPEV_ENTERLO && + J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0) + lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ + J->loopref = J->cur.nins; + } + } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */ + J->loopref = J->cur.nins; + if (--J->loopunroll < 0) + lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */ + } /* Side trace continues across a loop that's left or not entered. */ +} + +/* Handle the case when an already compiled loop op is hit. */ +static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev) +{ + if (J->parent == 0) { /* Root trace hit an inner loop. */ + /* Better let the inner loop spawn a side trace back here. */ + lj_trace_err(J, LJ_TRERR_LINNER); + } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */ + J->instunroll = 0; /* Cannot continue across a compiled loop op. */ + if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) + lnk = J->cur.traceno; /* Can form an extra loop. */ + rec_stop(J, lnk); /* Link to the loop. */ + } /* Side trace continues across a loop that's left or not entered. */ +} + +/* -- Record calls and returns -------------------------------------------- */ + +/* Record call setup. */ +static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + RecordIndex ix; + TValue *functv = &J->L->base[func]; + TRef trfunc, *fbase = &J->base[func]; + ptrdiff_t i; + for (i = 0; i <= nargs; i++) + (void)getslot(J, func+i); /* Ensure func and all args have a reference. */ + if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */ + ix.tab = fbase[0]; + copyTV(J->L, &ix.tabv, functv); + if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj)) + lj_trace_err(J, LJ_TRERR_NOMM); + for (i = ++nargs; i > 0; i--) /* Shift arguments up. */ + fbase[i] = fbase[i-1]; + fbase[0] = ix.mobj; /* Replace function. */ + functv = &ix.mobjv; + } + + /* Specialize to the runtime value of the called function. */ + trfunc = lj_ir_kfunc(J, funcV(functv)); + emitir(IRTG(IR_EQ, IRT_FUNC), fbase[0], trfunc); + fbase[0] = trfunc | TREF_FRAME; + J->maxslot = (BCReg)nargs; +} + +/* Record call. */ +void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + rec_call_setup(J, func, nargs); + /* Bump frame. */ + J->framedepth++; + J->base += func+1; + J->baseslot += func+1; +} + +/* Record tail call. */ +void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs) +{ + rec_call_setup(J, func, nargs); + if (frame_isvarg(J->L->base - 1)) { + BCReg cbase = (BCReg)frame_delta(J->L->base - 1); + if (--J->framedepth < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + func += cbase; + } + /* Move func + args down. */ + memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1)); + /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */ + /* Tailcalls can form a loop, so count towards the loop unroll limit. */ + if (++J->tailcalled > J->loopunroll) + lj_trace_err(J, LJ_TRERR_LUNROLL); +} + +/* Check unroll limits for down-recursion. */ +static int check_downrec_unroll(jit_State *J, GCproto *pt) +{ + IRRef ptref; + for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev) + if (ir_kgc(IR(ptref)) == obj2gco(pt)) { + int count = 0; + IRRef ref; + for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev) + if (IR(ref)->op1 == ptref) + count++; + if (count) { + if (J->pc == J->startpc) { + if (count + J->tailcalled > J->param[JIT_P_recunroll]) + return 1; + } else { + lj_trace_err(J, LJ_TRERR_DOWNREC); + } + } + } + return 0; +} + +/* Record return. */ +void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) +{ + TValue *frame = J->L->base - 1; + ptrdiff_t i; + for (i = 0; i < gotresults; i++) + (void)getslot(J, rbase+i); /* Ensure all results have a reference. */ + while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */ + BCReg cbase = (BCReg)frame_delta(frame); + if (--J->framedepth < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + lua_assert(J->baseslot > 1); + gotresults++; + rbase += cbase; + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */ + frame = frame_prevd(frame); + } + if (frame_isvarg(frame)) { + BCReg cbase = (BCReg)frame_delta(frame); + if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ + lj_trace_err(J, LJ_TRERR_NYIRETL); + lua_assert(J->baseslot > 1); + rbase += cbase; + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + frame = frame_prevd(frame); + } + if (frame_islua(frame)) { /* Return to Lua frame. */ + BCIns callins = *(frame_pc(frame)-1); + ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults; + BCReg cbase = bc_a(callins); + GCproto *pt = funcproto(frame_func(frame - (cbase+1))); + if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) { + if (check_downrec_unroll(J, pt)) { + J->maxslot = (BCReg)(rbase + gotresults); + lj_snap_purge(J); + rec_stop(J, J->cur.traceno); /* Down-recursion. */ + return; + } + lj_snap_add(J); + } + for (i = 0; i < nresults; i++) /* Adjust results. */ + J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL; + J->maxslot = cbase+(BCReg)nresults; + if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ + J->framedepth--; + lua_assert(J->baseslot > cbase+1); + J->baseslot -= cbase+1; + J->base -= cbase+1; + } else if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { + /* Return to lower frame would leave the loop in a root trace. */ + lj_trace_err(J, LJ_TRERR_LLEAVE); + } else { /* Return to lower frame. Guard for the target we return to. */ + TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); + TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame)); + emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc); + J->retdepth++; + J->needsnap = 1; + lua_assert(J->baseslot == 1); + /* Shift result slots up and clear the slots of the new frame below. */ + memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults); + memset(J->base-1, 0, sizeof(TRef)*(cbase+1)); + } + } else if (frame_iscont(frame)) { /* Return to continuation frame. */ + ASMFunction cont = frame_contf(frame); + BCReg cbase = (BCReg)frame_delta(frame); + if ((J->framedepth -= 2) < 0) + lj_trace_err(J, LJ_TRERR_NYIRETL); + J->baseslot -= (BCReg)cbase; + J->base -= cbase; + J->maxslot = cbase-2; + if (cont == lj_cont_ra) { + /* Copy result to destination slot. */ + BCReg dst = bc_a(*(frame_contpc(frame)-1)); + J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL; + if (dst >= J->maxslot) J->maxslot = dst+1; + } else if (cont == lj_cont_nop) { + /* Nothing to do here. */ + } else if (cont == lj_cont_cat) { + lua_assert(0); + } else { + /* Result type already specialized. */ + lua_assert(cont == lj_cont_condf || cont == lj_cont_condt); + } + } else { + lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ + } + lua_assert(J->baseslot >= 1); +} + +/* -- Metamethod handling ------------------------------------------------- */ + +/* Prepare to record call to metamethod. */ +static BCReg rec_mm_prep(jit_State *J, ASMFunction cont) +{ + BCReg s, top = curr_proto(J->L)->framesize; + TRef trcont; + setcont(&J->L->base[top], cont); +#if LJ_64 + trcont = lj_ir_kptr(J, (void *)((int64_t)cont - (int64_t)lj_vm_asm_begin)); +#else + trcont = lj_ir_kptr(J, (void *)cont); +#endif + J->base[top] = trcont | TREF_CONT; + J->framedepth++; + for (s = J->maxslot; s < top; s++) + J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */ + return top+1; +} + +/* Record metamethod lookup. */ +int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm) +{ + RecordIndex mix; + GCtab *mt; + if (tref_istab(ix->tab)) { + mt = tabref(tabV(&ix->tabv)->metatable); + mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); + } else if (tref_isudata(ix->tab)) { + int udtype = udataV(&ix->tabv)->udtype; + mt = tabref(udataV(&ix->tabv)->metatable); + /* The metatables of special userdata objects are treated as immutable. */ + if (udtype != UDTYPE_USERDATA) { + cTValue *mo; + if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) { + /* Specialize to the C library namespace object. */ + emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv))); + } else { + /* Specialize to the type of userdata. */ + TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype)); + } + immutable_mt: + mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm)); + if (!mo || tvisnil(mo)) + return 0; /* No metamethod. */ + /* Treat metamethod or index table as immutable, too. */ + if (!(tvisfunc(mo) || tvistab(mo))) + lj_trace_err(J, LJ_TRERR_BADTYPE); + copyTV(J->L, &ix->mobjv, mo); + ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB); + ix->mtv = mt; + ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */ + return 1; /* Got metamethod or index table. */ + } + mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META); + } else { + /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */ + mt = tabref(basemt_obj(J2G(J), &ix->tabv)); + if (mt == NULL) { + ix->mt = TREF_NIL; + return 0; /* No metamethod. */ + } + /* The cdata metatable is treated as immutable. */ + if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt; + ix->mt = mix.tab = lj_ir_ktab(J, mt); + goto nocheck; + } + ix->mt = mt ? mix.tab : TREF_NIL; + emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB)); +nocheck: + if (mt) { + GCstr *mmstr = mmname_str(J2G(J), mm); + cTValue *mo = lj_tab_getstr(mt, mmstr); + if (mo && !tvisnil(mo)) + copyTV(J->L, &ix->mobjv, mo); + ix->mtv = mt; + settabV(J->L, &mix.tabv, mt); + setstrV(J->L, &mix.keyv, mmstr); + mix.key = lj_ir_kstr(J, mmstr); + mix.val = 0; + mix.idxchain = 0; + ix->mobj = lj_record_idx(J, &mix); + return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */ + } + return 0; /* No metamethod. */ +} + +/* Record call to arithmetic metamethod. */ +static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm) +{ + /* Set up metamethod call first to save ix->tab and ix->tabv. */ + BCReg func = rec_mm_prep(J, lj_cont_ra); + TRef *base = J->base + func; + TValue *basev = J->L->base + func; + base[1] = ix->tab; base[2] = ix->key; + copyTV(J->L, basev+1, &ix->tabv); + copyTV(J->L, basev+2, &ix->keyv); + if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ + if (mm != MM_unm) { + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, &ix->keyv); + if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */ + goto ok; + } + lj_trace_err(J, LJ_TRERR_NOMM); + } +ok: + base[0] = ix->mobj; + copyTV(J->L, basev+0, &ix->mobjv); + lj_record_call(J, func, 2); + return 0; /* No result yet. */ +} + +/* Record call to __len metamethod. */ +static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv) +{ + RecordIndex ix; + ix.tab = tr; + copyTV(J->L, &ix.tabv, tv); + if (lj_record_mm_lookup(J, &ix, MM_len)) { + BCReg func = rec_mm_prep(J, lj_cont_ra); + TRef *base = J->base + func; + TValue *basev = J->L->base + func; + base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv); + base[1] = tr; copyTV(J->L, basev+1, tv); +#ifdef LUAJIT_ENABLE_LUA52COMPAT + base[2] = tr; copyTV(J->L, basev+2, tv); +#else + base[2] = TREF_NIL; setnilV(basev+2); +#endif + lj_record_call(J, func, 2); + } else { +#ifdef LUAJIT_ENABLE_LUA52COMPAT + if (tref_istab(tr)) + return lj_ir_call(J, IRCALL_lj_tab_len, tr); +#endif + lj_trace_err(J, LJ_TRERR_NOMM); + } + return 0; /* No result yet. */ +} + +/* Call a comparison metamethod. */ +static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op) +{ + BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt); + TRef *base = J->base + func; + TValue *tv = J->L->base + func; + base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key; + copyTV(J->L, tv+0, &ix->mobjv); + copyTV(J->L, tv+1, &ix->valv); + copyTV(J->L, tv+2, &ix->keyv); + lj_record_call(J, func, 2); +} + +/* Record call to equality comparison metamethod (for tab and udata only). */ +static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op) +{ + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */ + cTValue *bv; + TRef mo1 = ix->mobj; + TValue mo1v; + copyTV(J->L, &mo1v, &ix->mobjv); + /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ + bv = &ix->keyv; + if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else { /* Lookup metamethod on 2nd operand and compare both. */ + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, bv); + if (!lj_record_mm_lookup(J, ix, MM_eq) || + lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) + return; + } + rec_mm_callcomp(J, ix, op); + } +} + +/* Record call to ordered comparison metamethods (for arbitrary objects). */ +static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op) +{ + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + while (1) { + MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */ + if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ + cTValue *bv; + TRef mo1 = ix->mobj; + TValue mo1v; + copyTV(J->L, &mo1v, &ix->mobjv); + /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */ + bv = &ix->keyv; + if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) { + TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META); + emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt); + } else { /* Lookup metamethod on 2nd operand and compare both. */ + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, bv); + if (!lj_record_mm_lookup(J, ix, mm) || + lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv)) + goto nomatch; + } + rec_mm_callcomp(J, ix, op); + return; + } + nomatch: + /* First lookup failed. Retry with __lt and swapped operands. */ + if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */ + ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab; + copyTV(J->L, &ix->tabv, &ix->keyv); + copyTV(J->L, &ix->keyv, &ix->valv); + copyTV(J->L, &ix->valv, &ix->tabv); + op ^= 3; + } +} + +#if LJ_HASFFI +/* Setup call to cdata comparison metamethod. */ +static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm) +{ + lj_snap_add(J); + if (tref_iscdata(ix->val)) { + ix->tab = ix->val; + copyTV(J->L, &ix->tabv, &ix->valv); + } else { + lua_assert(tref_iscdata(ix->key)); + ix->tab = ix->key; + copyTV(J->L, &ix->tabv, &ix->keyv); + } + lj_record_mm_lookup(J, ix, mm); + rec_mm_callcomp(J, ix, op); +} +#endif + +/* -- Indexed access ------------------------------------------------------ */ + +/* Record bounds-check. */ +static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize) +{ + /* Try to emit invariant bounds checks. */ + if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) == + (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) { + IRRef ref = tref_ref(ikey); + IRIns *ir = IR(ref); + int32_t ofs = 0; + IRRef ofsref = 0; + /* Handle constant offsets. */ + if (ir->o == IR_ADD && irref_isk(ir->op2)) { + ofsref = ir->op2; + ofs = IR(ofsref)->i; + ref = ir->op1; + ir = IR(ref); + } + /* Got scalar evolution analysis results for this reference? */ + if (ref == J->scev.idx) { + int32_t stop; + lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD); + stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); + /* Runtime value for stop of loop is within bounds? */ + if ((int64_t)stop + ofs < (int64_t)asize) { + /* Emit invariant bounds check for stop. */ + emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop : + emitir(IRTI(IR_ADD), J->scev.stop, ofsref)); + /* Emit invariant bounds check for start, if not const or negative. */ + if (!(J->scev.dir && J->scev.start && + (int64_t)IR(J->scev.start)->i + ofs >= 0)) + emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey); + return; + } + } + } + emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */ +} + +/* Record indexed key lookup. */ +static TRef rec_idx_key(jit_State *J, RecordIndex *ix) +{ + TRef key; + GCtab *t = tabV(&ix->tabv); + ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */ + + /* Integer keys are looked up in the array part first. */ + key = ix->key; + if (tref_isnumber(key)) { + int32_t k = numberVint(&ix->keyv); + if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k) + k = LJ_MAX_ASIZE; + if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */ + TRef ikey = lj_opt_narrow_index(J, key); + TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); + if ((MSize)k < t->asize) { /* Currently an array key? */ + TRef arrayref; + rec_idx_abc(J, asizeref, ikey, t->asize); + arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY); + return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey); + } else { /* Currently not in array (may be an array extension)? */ + emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */ + if (k == 0 && tref_isk(key)) + key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */ + /* And continue with the hash lookup. */ + } + } else if (!tref_isk(key)) { + /* We can rule out const numbers which failed the integerness test + ** above. But all other numbers are potential array keys. + */ + if (t->asize == 0) { /* True sparse tables have an empty array part. */ + /* Guard that the array part stays empty. */ + TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE); + emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); + } else { + lj_trace_err(J, LJ_TRERR_NYITMIX); + } + } + } + + /* Otherwise the key is located in the hash part. */ + if (t->hmask == 0) { /* Shortcut for empty hash part. */ + /* Guard that the hash part stays empty. */ + TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); + emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0)); + return lj_ir_kkptr(J, niltvg(J2G(J))); + } + if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */ + ix->key = key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); + if (tref_isk(key)) { + /* Optimize lookup of constant hash keys. */ + MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val); + if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) && + hslot <= 65535*(MSize)sizeof(Node)) { + TRef node, kslot; + TRef hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); + emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask)); + node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE); + kslot = lj_ir_kslot(J, key, hslot / sizeof(Node)); + return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot); + } + } + /* Fall back to a regular hash lookup. */ + return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key); +} + +/* Determine whether a key is NOT one of the fast metamethod names. */ +static int nommstr(jit_State *J, TRef key) +{ + if (tref_isstr(key)) { + if (tref_isk(key)) { + GCstr *str = ir_kstr(IR(tref_ref(key))); + uint32_t mm; + for (mm = 0; mm <= MM_FAST; mm++) + if (mmname_str(J2G(J), mm) == str) + return 0; /* MUST be one the fast metamethod names. */ + } else { + return 0; /* Variable string key MAY be a metamethod name. */ + } + } + return 1; /* CANNOT be a metamethod name. */ +} + +/* Record indexed load/store. */ +TRef lj_record_idx(jit_State *J, RecordIndex *ix) +{ + TRef xref; + IROp xrefop, loadop; + cTValue *oldv; + + while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ + /* Never call raw lj_record_idx() on non-table. */ + lua_assert(ix->idxchain != 0); + if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) + lj_trace_err(J, LJ_TRERR_NOMM); + handlemm: + if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */ + BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra); + TRef *base = J->base + func; + TValue *tv = J->L->base + func; + base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key; + setfuncV(J->L, tv+0, funcV(&ix->mobjv)); + copyTV(J->L, tv+1, &ix->tabv); + copyTV(J->L, tv+2, &ix->keyv); + if (ix->val) { + base[3] = ix->val; + copyTV(J->L, tv+3, &ix->valv); + lj_record_call(J, func, 3); /* mobj(tab, key, val) */ + return 0; + } else { + lj_record_call(J, func, 2); /* res = mobj(tab, key) */ + return 0; /* No result yet. */ + } + } + /* Otherwise retry lookup with metaobject. */ + ix->tab = ix->mobj; + copyTV(J->L, &ix->tabv, &ix->mobjv); + if (--ix->idxchain == 0) + lj_trace_err(J, LJ_TRERR_IDXLOOP); + } + + /* First catch nil and NaN keys for tables. */ + if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) { + if (ix->val) /* Better fail early. */ + lj_trace_err(J, LJ_TRERR_STORENN); + if (tref_isk(ix->key)) { + if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) + goto handlemm; + return TREF_NIL; + } + } + + /* Record the key lookup. */ + xref = rec_idx_key(J, ix); + xrefop = IR(tref_ref(xref))->o; + loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD; + /* The lj_meta_tset() inconsistency is gone, but better play safe. */ + oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv; + + if (ix->val == 0) { /* Indexed load */ + IRType t = itype2irt(oldv); + TRef res; + if (oldv == niltvg(J2G(J))) { + emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + res = TREF_NIL; + } else { + res = emitir(IRTG(loadop, t), xref, 0); + } + if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) + goto handlemm; + if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */ + return res; + } else { /* Indexed store. */ + GCtab *mt = tabref(tabV(&ix->tabv)->metatable); + int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val); + if (tvisnil(oldv)) { /* Previous value was nil? */ + /* Need to duplicate the hasmm check for the early guards. */ + int hasmm = 0; + if (ix->idxchain && mt) { + cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex)); + hasmm = mo && !tvisnil(mo); + } + if (hasmm) + emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */ + else if (xrefop == IR_HREF) + emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32), + xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { + lua_assert(hasmm); + goto handlemm; + } + lua_assert(!hasmm); + if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ + TRef key = ix->key; + if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ + key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); + xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key); + keybarrier = 0; /* NEWREF already takes care of the key barrier. */ + } + } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) { + /* Cannot derive that the previous value was non-nil, must do checks. */ + if (xrefop == IR_HREF) /* Guard against store to niltv. */ + emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); + if (ix->idxchain) { /* Metamethod lookup required? */ + /* A check for NULL metatable is cheaper (hoistable) than a load. */ + if (!mt) { + TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META); + emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB)); + } else { + IRType t = itype2irt(oldv); + emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */ + } + } + } else { + keybarrier = 0; /* Previous non-nil value kept the key alive. */ + } + /* Convert int to number before storing. */ + if (!LJ_DUALNUM && tref_isinteger(ix->val)) + ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT); + emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val); + if (keybarrier || tref_isgcv(ix->val)) + emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0); + /* Invalidate neg. metamethod cache for stores with certain string keys. */ + if (!nommstr(J, ix->key)) { + TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM); + emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0)); + } + J->needsnap = 1; + return 0; + } +} + +/* -- Upvalue access ------------------------------------------------------ */ + +/* Record upvalue load/store. */ +static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val) +{ + GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv; + TRef fn = getcurrf(J); + IRRef uref; + int needbarrier = 0; + /* Note: this effectively limits LJ_MAX_UPVAL to 127. */ + uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff); + if (!uvp->closed) { + /* In current stack? */ + if (uvval(uvp) >= tvref(J->L->stack) && + uvval(uvp) < tvref(J->L->maxstack)) { + int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot)); + if (slot >= 0) { /* Aliases an SSA slot? */ + slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */ + /* NYI: add IR to guard that it's still aliasing the same slot. */ + if (val == 0) { + return getslot(J, slot); + } else { + J->base[slot] = val; + if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1); + return 0; + } + } + } + uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv)); + } else { + needbarrier = 1; + uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv)); + } + if (val == 0) { /* Upvalue load */ + IRType t = itype2irt(uvval(uvp)); + TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0); + if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */ + return res; + } else { /* Upvalue store. */ + /* Convert int to number before storing. */ + if (!LJ_DUALNUM && tref_isinteger(val)) + val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); + emitir(IRT(IR_USTORE, tref_type(val)), uref, val); + if (needbarrier && tref_isgcv(val)) + emitir(IRT(IR_OBAR, IRT_NIL), uref, val); + J->needsnap = 1; + return 0; + } +} + +/* -- Record calls to Lua functions --------------------------------------- */ + +/* Check unroll limits for calls. */ +static void check_call_unroll(jit_State *J) +{ + IRRef fref = tref_ref(J->base[-1]); + int32_t count = 0; + BCReg s; + for (s = J->baseslot - 1; s > 0; s--) + if ((J->slot[s] & TREF_FRAME) && tref_ref(J->slot[s]) == fref) + count++; + if (J->pc == J->startpc) { + if (count + J->tailcalled > J->param[JIT_P_recunroll]) { + J->pc++; + rec_stop(J, J->cur.traceno); /* Up-recursion or tail-recursion. */ + } + } else { + if (count > J->param[JIT_P_callunroll]) + lj_trace_err(J, LJ_TRERR_CUNROLL); + } +} + +/* Record Lua function setup. */ +static void rec_func_setup(jit_State *J) +{ + GCproto *pt = J->pt; + BCReg s, numparams = pt->numparams; + if ((pt->flags & PROTO_NOJIT)) + lj_trace_err(J, LJ_TRERR_CJITOFF); + if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + /* Fill up missing parameters with nil. */ + for (s = J->maxslot; s < numparams; s++) + J->base[s] = TREF_NIL; + /* The remaining slots should never be read before they are written. */ + J->maxslot = numparams; +} + +/* Record Lua vararg function setup. */ +static void rec_func_vararg(jit_State *J) +{ + GCproto *pt = J->pt; + BCReg s, fixargs, vframe = J->maxslot+1; + lua_assert((pt->flags & PROTO_VARARG)); + if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + J->base[vframe-1] = J->base[-1]; /* Copy function up. */ + /* Copy fixarg slots up and set their original slots to nil. */ + fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot; + for (s = 0; s < fixargs; s++) { + J->base[vframe+s] = J->base[s]; + J->base[s] = TREF_NIL; + } + J->maxslot = fixargs; + J->framedepth++; + J->base += vframe; + J->baseslot += vframe; +} + +/* Record entry to a Lua function. */ +static void rec_func_lua(jit_State *J) +{ + rec_func_setup(J); + check_call_unroll(J); +} + +/* Record entry to an already compiled function. */ +static void rec_func_jit(jit_State *J, TraceNo lnk) +{ + rec_func_setup(J); + J->instunroll = 0; /* Cannot continue across a compiled function. */ + if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) + lnk = J->cur.traceno; /* Can form an extra tail-recursive loop. */ + rec_stop(J, lnk); /* Link to the function. */ +} + +/* -- Vararg handling ----------------------------------------------------- */ + +/* Detect y = select(x, ...) idiom. */ +static int select_detect(jit_State *J) +{ + BCIns ins = J->pc[1]; + if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) { + cTValue *func = &J->L->base[bc_a(ins)]; + if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) + return 1; + } + return 0; +} + +/* Record vararg instruction. */ +static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults) +{ + int32_t numparams = J->pt->numparams; + ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1; + lua_assert(frame_isvarg(J->L->base-1)); + if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ + ptrdiff_t i; + if (nvararg < 0) nvararg = 0; + if (nresults == -1) { + nresults = nvararg; + J->maxslot = dst + (BCReg)nvararg; + } else if (dst + nresults > J->maxslot) { + J->maxslot = dst + (BCReg)nresults; + } + for (i = 0; i < nresults; i++) { + J->base[dst+i] = i < nvararg ? J->base[i - nvararg - 1] : TREF_NIL; + lua_assert(J->base[dst+i] != 0); + } + } else { /* Unknown number of varargs passed to trace. */ + TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME); + int32_t frofs = 8*(1+numparams)+FRAME_VARG; + if (nresults >= 0) { /* Known fixed number of results. */ + ptrdiff_t i; + if (nvararg > 0) { + ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg; + TRef vbase; + if (nvararg >= nresults) + emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults)); + else + emitir(IRTGI(IR_EQ), fr, lj_ir_kint(J, frame_ftsz(J->L->base-1))); + vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); + vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); + for (i = 0; i < nload; i++) { + IRType t = itype2irt(&J->L->base[i-1-nvararg]); + TRef aref = emitir(IRT(IR_AREF, IRT_P32), + vbase, lj_ir_kint(J, (int32_t)i)); + TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0); + if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ + J->base[dst+i] = tr; + } + } else { + emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs)); + nvararg = 0; + } + for (i = nvararg; i < nresults; i++) + J->base[dst+i] = TREF_NIL; + if (dst + (BCReg)nresults > J->maxslot) + J->maxslot = dst + (BCReg)nresults; + } else if (select_detect(J)) { /* y = select(x, ...) */ + TRef tridx = J->base[dst-1]; + TRef tr = TREF_NIL; + ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]); + if (idx < 0) goto nyivarg; + if (idx != 0 && !tref_isinteger(tridx)) + tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX); + if (idx != 0 && tref_isk(tridx)) { + emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT), + fr, lj_ir_kint(J, frofs+8*(int32_t)idx)); + frofs -= 8; /* Bias for 1-based index. */ + } else if (idx <= nvararg) { /* Compute size. */ + TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs)); + if (numparams) + emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0)); + tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3)); + if (idx != 0) { + tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1)); + rec_idx_abc(J, tr, tridx, (uint32_t)nvararg); + } + } else { + TRef tmp = lj_ir_kint(J, frofs); + if (idx != 0) { + TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3)); + tmp = emitir(IRTI(IR_ADD), tmp2, tmp); + } else { + tr = lj_ir_kint(J, 0); + } + emitir(IRTGI(IR_LT), fr, tmp); + } + if (idx != 0 && idx <= nvararg) { + IRType t; + TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); + vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); + t = itype2irt(&J->L->base[idx-2-nvararg]); + aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx); + tr = emitir(IRTG(IR_VLOAD, t), aref, 0); + if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ + } + J->base[dst-2] = tr; + J->maxslot = dst-1; + J->bcskip = 2; /* Skip CALLM + select. */ + } else { + nyivarg: + setintV(&J->errinfo, BC_VARG); + lj_trace_err_info(J, LJ_TRERR_NYIBC); + } + } +} + +/* -- Record allocations -------------------------------------------------- */ + +static TRef rec_tnew(jit_State *J, uint32_t ah) +{ + uint32_t asize = ah & 0x7ff; + uint32_t hbits = ah >> 11; + if (asize == 0x7ff) asize = 0x801; + return emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits); +} + +/* -- Record bytecode ops ------------------------------------------------- */ + +/* Prepare for comparison. */ +static void rec_comp_prep(jit_State *J) +{ + /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */ + if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins) + emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); + lj_snap_add(J); +} + +/* Fixup comparison. */ +static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond) +{ + BCIns jmpins = pc[1]; + const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0); + SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; + /* Set PC to opposite target to avoid re-recording the comp. in side trace. */ + J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc); + J->needsnap = 1; + if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins); + lj_snap_shrink(J); /* Shrink last snapshot if possible. */ +} + +/* Record the next bytecode instruction (_before_ it's executed). */ +void lj_record_ins(jit_State *J) +{ + cTValue *lbase; + RecordIndex ix; + const BCIns *pc; + BCIns ins; + BCOp op; + TRef ra, rb, rc; + + /* Perform post-processing action before recording the next instruction. */ + if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) { + switch (J->postproc) { + case LJ_POST_FIXCOMP: /* Fixup comparison. */ + pc = frame_pc(&J2G(J)->tmptv); + rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1))); + /* fallthrough */ + case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */ + if (!tvistruecond(&J2G(J)->tmptv2)) + J->fold.ins.o ^= 1; /* Flip guard to opposite. */ + lj_opt_fold(J); /* Emit pending guard. */ + /* fallthrough */ + case LJ_POST_FIXBOOL: + if (!tvistruecond(&J2G(J)->tmptv2)) { + BCReg s; + for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */ + if (J->base[s] == TREF_TRUE && tvisfalse(&J->L->base[s])) { + J->base[s] = TREF_FALSE; + break; + } + } + break; + case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */ + if (bc_op(*J->pc) >= BC__MAX) + return; + break; + default: lua_assert(0); break; + } + J->postproc = LJ_POST_NONE; + } + + /* Need snapshot before recording next bytecode (e.g. after a store). */ + if (J->needsnap) { + J->needsnap = 0; + lj_snap_purge(J); + lj_snap_add(J); + J->mergesnap = 1; + } + + /* Skip some bytecodes. */ + if (LJ_UNLIKELY(J->bcskip > 0)) { + J->bcskip--; + return; + } + + /* Record only closed loops for root traces. */ + pc = J->pc; + if (J->framedepth == 0 && + (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent) + lj_trace_err(J, LJ_TRERR_LLEAVE); + +#ifdef LUA_USE_ASSERT + rec_check_slots(J); + rec_check_ir(J); +#endif + + /* Keep a copy of the runtime values of var/num/str operands. */ +#define rav (&ix.valv) +#define rbv (&ix.tabv) +#define rcv (&ix.keyv) + + lbase = J->L->base; + ins = *pc; + op = bc_op(ins); + ra = bc_a(ins); + ix.val = 0; + switch (bcmode_a(op)) { + case BCMvar: + copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break; + default: break; /* Handled later. */ + } + rb = bc_b(ins); + rc = bc_c(ins); + switch (bcmode_b(op)) { + case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */ + case BCMvar: + copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break; + default: break; /* Handled later. */ + } + switch (bcmode_c(op)) { + case BCMvar: + copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break; + case BCMpri: setitype(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break; + case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc); + copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) : + lj_ir_knumint(J, numV(tv)); } break; + case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc)); + setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break; + default: break; /* Handled later. */ + } + + switch (op) { + + /* -- Comparison ops ---------------------------------------------------- */ + + case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: +#if LJ_HASFFI + if (tref_iscdata(ra) || tref_iscdata(rc)) { + rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt); + break; + } +#endif + /* Emit nothing for two numeric or string consts. */ + if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) { + IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra); + IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc); + int irop; + if (ta != tc) { + /* Widen mixed number/int comparisons to number/number comparison. */ + if (ta == IRT_INT && tc == IRT_NUM) { + ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT); + ta = IRT_NUM; + } else if (ta == IRT_NUM && tc == IRT_INT) { + rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); + } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) && + (tc == IRT_FALSE || tc == IRT_TRUE))) { + break; /* Interpreter will throw for two different types. */ + } + } + rec_comp_prep(J); + irop = (int)op - (int)BC_ISLT + (int)IR_LT; + if (ta == IRT_NUM) { + if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */ + if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) + irop ^= 5; + } else if (ta == IRT_INT) { + if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop)) + irop ^= 1; + } else if (ta == IRT_STR) { + if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1; + ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc); + rc = lj_ir_kint(J, 0); + ta = IRT_INT; + } else { + rec_mm_comp(J, &ix, (int)op); + break; + } + emitir(IRTG(irop, ta), ra, rc); + rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1); + } + break; + + case BC_ISEQV: case BC_ISNEV: + case BC_ISEQS: case BC_ISNES: + case BC_ISEQN: case BC_ISNEN: + case BC_ISEQP: case BC_ISNEP: +#if LJ_HASFFI + if (tref_iscdata(ra) || tref_iscdata(rc)) { + rec_mm_comp_cdata(J, &ix, op, MM_eq); + break; + } +#endif + /* Emit nothing for two non-table, non-udata consts. */ + if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) { + int diff; + rec_comp_prep(J); + diff = lj_record_objcmp(J, ra, rc, rav, rcv); + if (diff == 1 && (tref_istab(ra) || tref_isudata(ra))) { + /* Only check __eq if different, but the same type (table or udata). */ + rec_mm_equal(J, &ix, (int)op); + break; + } + rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff); + } + break; + + /* -- Unary test and copy ops ------------------------------------------- */ + + case BC_ISTC: case BC_ISFC: + if ((op & 1) == tref_istruecond(rc)) + rc = 0; /* Don't store if condition is not true. */ + /* fallthrough */ + case BC_IST: case BC_ISF: /* Type specialization suffices. */ + if (bc_a(pc[1]) < J->maxslot) + J->maxslot = bc_a(pc[1]); /* Shrink used slots. */ + break; + + /* -- Unary ops --------------------------------------------------------- */ + + case BC_NOT: + /* Type specialization already forces const result. */ + rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE; + break; + + case BC_LEN: + if (tref_isstr(rc)) + rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN); +#ifndef LUAJIT_ENABLE_LUA52COMPAT + else if (tref_istab(rc)) + rc = lj_ir_call(J, IRCALL_lj_tab_len, rc); +#endif + else + rc = rec_mm_len(J, rc, rcv); + break; + + /* -- Arithmetic ops ---------------------------------------------------- */ + + case BC_UNM: + if (tref_isnumber_str(rc)) { + rc = lj_opt_narrow_unm(J, rc, rcv); + } else { + ix.tab = rc; + copyTV(J->L, &ix.tabv, rcv); + rc = rec_mm_arith(J, &ix, MM_unm); + } + break; + + case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV: + /* Swap rb/rc and rbv/rcv. rav is temp. */ + ix.tab = rc; ix.key = rc = rb; rb = ix.tab; + copyTV(J->L, rav, rbv); + copyTV(J->L, rbv, rcv); + copyTV(J->L, rcv, rav); + if (op == BC_MODNV) + goto recmod; + /* fallthrough */ + case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN: + case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: { + MMS mm = bcmode_mm(op); + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv, + (int)mm - (int)MM_add + (int)IR_ADD); + else + rc = rec_mm_arith(J, &ix, mm); + break; + } + + case BC_MODVN: case BC_MODVV: + recmod: + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_mod(J, rb, rc, rcv); + else + rc = rec_mm_arith(J, &ix, MM_mod); + break; + + case BC_POW: + if (tref_isnumber_str(rb) && tref_isnumber_str(rc)) + rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv); + else + rc = rec_mm_arith(J, &ix, MM_pow); + break; + + /* -- Constant and move ops --------------------------------------------- */ + + case BC_MOV: + /* Clear gap of method call to avoid resurrecting previous refs. */ + if (ra > J->maxslot) J->base[ra-1] = 0; + break; + case BC_KSTR: case BC_KNUM: case BC_KPRI: + break; + case BC_KSHORT: + rc = lj_ir_kint(J, (int32_t)(int16_t)rc); + break; + case BC_KNIL: + while (ra <= rc) + J->base[ra++] = TREF_NIL; + if (rc >= J->maxslot) J->maxslot = rc+1; + break; +#if LJ_HASFFI + case BC_KCDATA: + rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA); + break; +#endif + + /* -- Upvalue and function ops ------------------------------------------ */ + + case BC_UGET: + rc = rec_upvalue(J, rc, 0); + break; + case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP: + rec_upvalue(J, ra, rc); + break; + + /* -- Table ops --------------------------------------------------------- */ + + case BC_GGET: case BC_GSET: + settabV(J->L, &ix.tabv, tabref(J->fn->l.env)); + ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV); + ix.idxchain = LJ_MAX_IDXCHAIN; + rc = lj_record_idx(J, &ix); + break; + + case BC_TGETB: case BC_TSETB: + setintV(&ix.keyv, (int32_t)rc); + ix.key = lj_ir_kint(J, (int32_t)rc); + /* fallthrough */ + case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS: + ix.idxchain = LJ_MAX_IDXCHAIN; + rc = lj_record_idx(J, &ix); + break; + + case BC_TNEW: + rc = rec_tnew(J, rc); + break; + case BC_TDUP: + rc = emitir(IRTG(IR_TDUP, IRT_TAB), + lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0); + break; + + /* -- Calls and vararg handling ----------------------------------------- */ + + case BC_ITERC: + J->base[ra] = getslot(J, ra-3); + J->base[ra+1] = getslot(J, ra-2); + J->base[ra+2] = getslot(J, ra-1); + { /* Do the actual copy now because lj_record_call needs the values. */ + TValue *b = &J->L->base[ra]; + copyTV(J->L, b, b-3); + copyTV(J->L, b+1, b-2); + copyTV(J->L, b+2, b-1); + } + lj_record_call(J, ra, (ptrdiff_t)rc-1); + break; + + /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */ + case BC_CALLM: + rc = (BCReg)(J->L->top - J->L->base) - ra; + /* fallthrough */ + case BC_CALL: + lj_record_call(J, ra, (ptrdiff_t)rc-1); + break; + + case BC_CALLMT: + rc = (BCReg)(J->L->top - J->L->base) - ra; + /* fallthrough */ + case BC_CALLT: + lj_record_tailcall(J, ra, (ptrdiff_t)rc-1); + break; + + case BC_VARG: + rec_varg(J, ra, (ptrdiff_t)rb-1); + break; + + /* -- Returns ----------------------------------------------------------- */ + + case BC_RETM: + /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */ + rc = (BCReg)(J->L->top - J->L->base) - ra + 1; + /* fallthrough */ + case BC_RET: case BC_RET0: case BC_RET1: + lj_record_ret(J, ra, (ptrdiff_t)rc-1); + break; + + /* -- Loops and branches ------------------------------------------------ */ + + case BC_FORI: + if (rec_for(J, pc, 0) != LOOPEV_LEAVE) + J->loopref = J->cur.nins; + break; + case BC_JFORI: + lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL); + if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ + rec_stop(J, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); + /* Continue tracing if the loop is not entered. */ + break; + + case BC_FORL: + rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1)); + break; + case BC_ITERL: + rec_loop_interp(J, pc, rec_iterl(J, *pc)); + break; + case BC_LOOP: + rec_loop_interp(J, pc, rec_loop(J, ra)); + break; + + case BC_JFORL: + rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1)); + break; + case BC_JITERL: + rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins)); + break; + case BC_JLOOP: + rec_loop_jit(J, rc, rec_loop(J, ra)); + break; + + case BC_IFORL: + case BC_IITERL: + case BC_ILOOP: + case BC_IFUNCF: + case BC_IFUNCV: + lj_trace_err(J, LJ_TRERR_BLACKL); + break; + + case BC_JMP: + if (ra < J->maxslot) + J->maxslot = ra; /* Shrink used slots. */ + break; + + /* -- Function headers -------------------------------------------------- */ + + case BC_FUNCF: + rec_func_lua(J); + break; + case BC_JFUNCF: + rec_func_jit(J, rc); + break; + + case BC_FUNCV: + rec_func_vararg(J); + rec_func_lua(J); + break; + case BC_JFUNCV: + lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */ + break; + + case BC_FUNCC: + case BC_FUNCCW: + lj_ffrecord_func(J); + break; + + default: + if (op >= BC__MAX) { + lj_ffrecord_func(J); + break; + } + /* fallthrough */ + case BC_ITERN: + case BC_ISNEXT: + case BC_CAT: + case BC_UCLO: + case BC_FNEW: + case BC_TSETM: + setintV(&J->errinfo, (int32_t)op); + lj_trace_err_info(J, LJ_TRERR_NYIBC); + break; + } + + /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */ + if (bcmode_a(op) == BCMdst && rc) { + J->base[ra] = rc; + if (ra >= J->maxslot) J->maxslot = ra+1; + } + +#undef rav +#undef rbv +#undef rcv + + /* Limit the number of recorded IR instructions. */ + if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord]) + lj_trace_err(J, LJ_TRERR_TRACEOV); +} + +/* -- Recording setup ----------------------------------------------------- */ + +/* Setup recording for a root trace started by a hot loop. */ +static const BCIns *rec_setup_root(jit_State *J) +{ + /* Determine the next PC and the bytecode range for the loop. */ + const BCIns *pcj, *pc = J->pc; + BCIns ins = *pc; + BCReg ra = bc_a(ins); + switch (bc_op(ins)) { + case BC_FORL: + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + pc += 1+bc_j(ins); + J->bc_min = pc; + break; + case BC_ITERL: + lua_assert(bc_op(pc[-1]) == BC_ITERC); + J->maxslot = ra + bc_b(pc[-1]) - 1; + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + pc += 1+bc_j(ins); + lua_assert(bc_op(pc[-1]) == BC_JMP); + J->bc_min = pc; + break; + case BC_LOOP: + /* Only check BC range for real loops, but not for "repeat until true". */ + pcj = pc + bc_j(ins); + ins = *pcj; + if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) { + J->bc_min = pcj+1 + bc_j(ins); + J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); + } + J->maxslot = ra; + pc++; + break; + case BC_RET: + case BC_RET0: + case BC_RET1: + /* No bytecode range check for down-recursive root traces. */ + J->maxslot = ra + bc_d(ins); + break; + case BC_FUNCF: + /* No bytecode range check for root traces started by a hot call. */ + J->maxslot = J->pt->numparams; + pc++; + break; + default: + lua_assert(0); + break; + } + return pc; +} + +/* Setup recording for a side trace. */ +static void rec_setup_side(jit_State *J, GCtrace *T) +{ + SnapShot *snap = &T->snap[J->exitno]; + SnapEntry *map = &T->snapmap[snap->mapofs]; + MSize n, nent = snap->nent; + BloomFilter seen = 0; + /* Emit IR for slots inherited from parent snapshot. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + BCReg s = snap_slot(sn); + IRIns *ir = &T->ir[ref]; + IRType t = irt_type(ir->t); + TRef tr; + /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */ + if (bloomtest(seen, ref)) { + MSize j; + for (j = 0; j < n; j++) + if (snap_ref(map[j]) == ref) { + tr = J->slot[snap_slot(map[j])]; + goto setslot; + } + } + bloomset(seen, ref); + switch ((IROp)ir->o) { + /* Only have to deal with constants that can occur in stack slots. */ + case IR_KPRI: tr = TREF_PRI(t); break; + case IR_KINT: tr = lj_ir_kint(J, ir->i); break; + case IR_KGC: tr = lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t)); break; + case IR_KNUM: tr = lj_ir_k64(J, IR_KNUM, ir_knum(ir)); break; + case IR_KINT64: tr = lj_ir_k64(J, IR_KINT64, ir_kint64(ir)); break; + case IR_KPTR: tr = lj_ir_kptr(J, ir_kptr(ir)); break; /* Continuation. */ + /* Inherited SLOADs don't need a guard or type check. */ + case IR_SLOAD: + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; + tr = emitir_raw(IRT(IR_SLOAD, t), s, + (ir->op2&IRSLOAD_READONLY) | IRSLOAD_INHERIT|IRSLOAD_PARENT); + break; + /* Parent refs are already typed and don't need a guard. */ + default: + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; + tr = emitir_raw(IRT(IR_SLOAD, t), s, IRSLOAD_INHERIT|IRSLOAD_PARENT); + break; + } + setslot: + J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */ + if ((sn & SNAP_FRAME)) + J->baseslot = s+1; + } + J->base = J->slot + J->baseslot; + J->maxslot = snap->nslots - J->baseslot; + J->framedepth = snap->depth; + lj_snap_add(J); +} + +/* Setup for recording a new trace. */ +void lj_record_setup(jit_State *J) +{ + uint32_t i; + + /* Initialize state related to current trace. */ + memset(J->slot, 0, sizeof(J->slot)); + memset(J->chain, 0, sizeof(J->chain)); + memset(J->bpropcache, 0, sizeof(J->bpropcache)); + J->scev.idx = REF_NIL; + + J->baseslot = 1; /* Invoking function is at base[-1]. */ + J->base = J->slot + J->baseslot; + J->maxslot = 0; + J->framedepth = 0; + J->retdepth = 0; + + J->instunroll = J->param[JIT_P_instunroll]; + J->loopunroll = J->param[JIT_P_loopunroll]; + J->tailcalled = 0; + J->loopref = 0; + + J->bc_min = NULL; /* Means no limit. */ + J->bc_extent = ~(MSize)0; + + /* Emit instructions for fixed references. Also triggers initial IR alloc. */ + emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno); + for (i = 0; i <= 2; i++) { + IRIns *ir = IR(REF_NIL-i); + ir->i = 0; + ir->t.irt = (uint8_t)(IRT_NIL+i); + ir->o = IR_KPRI; + ir->prev = 0; + } + J->cur.nk = REF_TRUE; + + J->startpc = J->pc; + setmref(J->cur.startpc, J->pc); + if (J->parent) { /* Side trace. */ + GCtrace *T = traceref(J, J->parent); + TraceNo root = T->root ? T->root : J->parent; + J->cur.root = (uint16_t)root; + J->cur.startins = BCINS_AD(BC_JMP, 0, 0); + /* Check whether we could at least potentially form an extra loop. */ + if (J->exitno == 0 && T->snap[0].nent == 0) { + /* We can narrow a FORL for some side traces, too. */ + if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI && + bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) { + lj_snap_add(J); + rec_for_loop(J, J->pc-1, &J->scev, 1); + goto sidecheck; + } + } else { + J->startpc = NULL; /* Prevent forming an extra loop. */ + } + rec_setup_side(J, T); + sidecheck: + if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] || + T->snap[J->exitno].count >= J->param[JIT_P_hotexit] + + J->param[JIT_P_tryside]) + rec_stop(J, TRACE_INTERP); + } else { /* Root trace. */ + J->cur.root = 0; + J->cur.startins = *J->pc; + J->pc = rec_setup_root(J); + /* Note: the loop instruction itself is recorded at the end and not + ** at the start! So snapshot #0 needs to point to the *next* instruction. + */ + lj_snap_add(J); + if (bc_op(J->cur.startins) == BC_FORL) + rec_for_loop(J, J->pc-1, &J->scev, 1); + if (1 + J->pt->framesize >= LJ_MAX_JSLOTS) + lj_trace_err(J, LJ_TRERR_STACKOV); + } +#ifdef LUAJIT_ENABLE_CHECKHOOK + /* Regularly check for instruction/line hooks from compiled code and + ** exit to the interpreter if the hooks are set. + ** + ** This is a compile-time option and disabled by default, since the + ** hook checks may be quite expensive in tight loops. + ** + ** Note this is only useful if hooks are *not* set most of the time. + ** Use this only if you want to *asynchronously* interrupt the execution. + ** + ** You can set the instruction hook via lua_sethook() with a count of 1 + ** from a signal handler or another native thread. Please have a look + ** at the first few functions in luajit.c for an example (Ctrl-C handler). + */ + { + TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), + lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE); + tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT))); + emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0)); + } +#endif +} + +#undef IR +#undef emitir_raw +#undef emitir + +#endif diff --git a/src/luajit2/src/lj_record.h b/src/luajit2/src/lj_record.h new file mode 100644 index 0000000000..186a25f73f --- /dev/null +++ b/src/luajit2/src/lj_record.h @@ -0,0 +1,43 @@ +/* +** Trace recorder (bytecode -> SSA IR). +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_RECORD_H +#define _LJ_RECORD_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +/* Context for recording an indexed load/store. */ +typedef struct RecordIndex { + TValue tabv; /* Runtime value of table (or indexed object). */ + TValue keyv; /* Runtime value of key. */ + TValue valv; /* Runtime value of stored value. */ + TValue mobjv; /* Runtime value of metamethod object. */ + GCtab *mtv; /* Runtime value of metatable object. */ + cTValue *oldv; /* Runtime value of previously stored value. */ + TRef tab; /* Table (or indexed object) reference. */ + TRef key; /* Key reference. */ + TRef val; /* Value reference for a store or 0 for a load. */ + TRef mt; /* Metatable reference. */ + TRef mobj; /* Metamethod object reference. */ + int idxchain; /* Index indirections left or 0 for raw lookup. */ +} RecordIndex; + +LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b, + cTValue *av, cTValue *bv); + +LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs); +LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs); +LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults); + +LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm); +LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix); + +LJ_FUNC void lj_record_ins(jit_State *J); +LJ_FUNC void lj_record_setup(jit_State *J); +#endif + +#endif diff --git a/src/luajit2/src/lj_snap.c b/src/luajit2/src/lj_snap.c new file mode 100644 index 0000000000..9124b7896b --- /dev/null +++ b/src/luajit2/src/lj_snap.c @@ -0,0 +1,457 @@ +/* +** Snapshot handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_snap_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_state.h" +#include "lj_frame.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_target.h" + +/* Some local macros to save typing. Undef'd at the end. */ +#define IR(ref) (&J->cur.ir[(ref)]) + +/* -- Snapshot buffer allocation ------------------------------------------ */ + +/* Grow snapshot buffer. */ +void lj_snap_grow_buf_(jit_State *J, MSize need) +{ + MSize maxsnap = (MSize)J->param[JIT_P_maxsnap]; + if (need > maxsnap) + lj_trace_err(J, LJ_TRERR_SNAPOV); + lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot); + J->cur.snap = J->snapbuf; +} + +/* Grow snapshot map buffer. */ +void lj_snap_grow_map_(jit_State *J, MSize need) +{ + if (need < 2*J->sizesnapmap) + need = 2*J->sizesnapmap; + else if (need < 64) + need = 64; + J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf, + J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry)); + J->cur.snapmap = J->snapmapbuf; + J->sizesnapmap = need; +} + +/* -- Snapshot generation ------------------------------------------------- */ + +/* Add all modified slots to the snapshot. */ +static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots) +{ + IRRef retf = J->chain[IR_RETF]; /* Limits SLOAD restore elimination. */ + BCReg s; + MSize n = 0; + for (s = 0; s < nslots; s++) { + TRef tr = J->slot[s]; + IRRef ref = tref_ref(tr); + if (ref) { + SnapEntry sn = SNAP_TR(s, tr); + IRIns *ir = IR(ref); + if (ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { + /* No need to snapshot unmodified non-inherited slots. */ + if (!(ir->op2 & IRSLOAD_INHERIT)) + continue; + /* No need to restore readonly slots and unmodified non-parent slots. */ + if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) && + (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT) + sn |= SNAP_NORESTORE; + } + if (LJ_SOFTFP && irt_isnum(ir->t)) + sn |= SNAP_SOFTFPNUM; + map[n++] = sn; + } + } + return n; +} + +/* Add frame links at the end of the snapshot. */ +static void snapshot_framelinks(jit_State *J, SnapEntry *map) +{ + cTValue *frame = J->L->base - 1; + cTValue *lim = J->L->base - J->baseslot; + MSize f = 0; + map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */ + while (frame > lim) { /* Backwards traversal of all frames above base. */ + if (frame_islua(frame)) { + map[f++] = SNAP_MKPC(frame_pc(frame)); + frame = frame_prevl(frame); + } else if (frame_iscont(frame)) { + map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); + map[f++] = SNAP_MKPC(frame_contpc(frame)); + frame = frame_prevd(frame); + } else { + lua_assert(!frame_isc(frame)); + map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); + frame = frame_prevd(frame); + } + } + lua_assert(f == (MSize)(1 + J->framedepth)); +} + +/* Take a snapshot of the current stack. */ +static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap) +{ + BCReg nslots = J->baseslot + J->maxslot; + MSize nent; + SnapEntry *p; + /* Conservative estimate. */ + lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1); + p = &J->cur.snapmap[nsnapmap]; + nent = snapshot_slots(J, p, nslots); + snapshot_framelinks(J, p + nent); + snap->mapofs = (uint16_t)nsnapmap; + snap->ref = (IRRef1)J->cur.nins; + snap->nent = (uint8_t)nent; + snap->depth = (uint8_t)J->framedepth; + snap->nslots = (uint8_t)nslots; + snap->count = 0; + J->cur.nsnapmap = (uint16_t)(nsnapmap + nent + 1 + J->framedepth); +} + +/* Add or merge a snapshot. */ +void lj_snap_add(jit_State *J) +{ + MSize nsnap = J->cur.nsnap; + MSize nsnapmap = J->cur.nsnapmap; + /* Merge if no ins. inbetween or if requested and no guard inbetween. */ + if (J->mergesnap ? !irt_isguard(J->guardemit) : + (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) { + nsnapmap = J->cur.snap[--nsnap].mapofs; + } else { + lj_snap_grow_buf(J, nsnap+1); + J->cur.nsnap = (uint16_t)(nsnap+1); + } + J->mergesnap = 0; + J->guardemit.irt = 0; + snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap); +} + +/* -- Snapshot modification ----------------------------------------------- */ + +#define SNAP_USEDEF_SLOTS (LJ_MAX_JSLOTS+LJ_STACK_EXTRA) + +/* Find unused slots with reaching-definitions bytecode data-flow analysis. */ +static BCReg snap_usedef(jit_State *J, uint8_t *udf, + const BCIns *pc, BCReg maxslot) +{ + BCReg s; + GCobj *o; + + if (maxslot == 0) return 0; +#ifdef LUAJIT_USE_VALGRIND + /* Avoid errors for harmless reads beyond maxslot. */ + memset(udf, 1, SNAP_USEDEF_SLOTS); +#else + memset(udf, 1, maxslot); +#endif + + /* Treat open upvalues as used. */ + o = gcref(J->L->openupval); + while (o) { + if (uvval(gco2uv(o)) < J->L->base) break; + udf[uvval(gco2uv(o)) - J->L->base] = 0; + o = gcref(o->gch.nextgc); + } + +#define USE_SLOT(s) udf[(s)] &= ~1 +#define DEF_SLOT(s) udf[(s)] *= 3 + + /* Scan through following bytecode and check for uses/defs. */ + lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); + for (;;) { + BCIns ins = *pc++; + BCOp op = bc_op(ins); + switch (bcmode_b(op)) { + case BCMvar: USE_SLOT(bc_b(ins)); break; + default: break; + } + switch (bcmode_c(op)) { + case BCMvar: USE_SLOT(bc_c(ins)); break; + case BCMrbase: + lua_assert(op == BC_CAT); + for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + break; + case BCMjump: + handle_jump: { + BCReg minslot = bc_a(ins); + if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT; + else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-1])-1; + else if (op == BC_UCLO) { pc += bc_j(ins); break; } + for (s = minslot; s < maxslot; s++) DEF_SLOT(s); + return minslot < maxslot ? minslot : maxslot; + } + case BCMlit: + if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { + goto handle_jump; + } else if (bc_isret(op)) { + BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1); + for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); + for (; s < top; s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + return 0; + } + break; + case BCMfunc: return maxslot; /* NYI: will abort, anyway. */ + default: break; + } + switch (bcmode_a(op)) { + case BCMvar: USE_SLOT(bc_a(ins)); break; + case BCMdst: + if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins)); + break; + case BCMbase: + if (op >= BC_CALLM && op <= BC_VARG) { + BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ? + maxslot : (bc_a(ins) + bc_c(ins)); + s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0); + for (; s < top; s++) USE_SLOT(s); + for (; s < maxslot; s++) DEF_SLOT(s); + if (op == BC_CALLT || op == BC_CALLMT) { + for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s); + return 0; + } + } else if (op == BC_KNIL) { + for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s); + } else if (op == BC_TSETM) { + for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s); + } + break; + default: break; + } + lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); + } + +#undef USE_SLOT +#undef DEF_SLOT + + return 0; /* unreachable */ +} + +/* Purge dead slots before the next snapshot. */ +void lj_snap_purge(jit_State *J) +{ + uint8_t udf[SNAP_USEDEF_SLOTS]; + BCReg maxslot = J->maxslot; + BCReg s = snap_usedef(J, udf, J->pc, maxslot); + for (; s < maxslot; s++) + if (udf[s] != 0) + J->base[s] = 0; /* Purge dead slots. */ +} + +/* Shrink last snapshot. */ +void lj_snap_shrink(jit_State *J) +{ + SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; + SnapEntry *map = &J->cur.snapmap[snap->mapofs]; + MSize n, m, nlim, nent = snap->nent; + uint8_t udf[SNAP_USEDEF_SLOTS]; + BCReg maxslot = J->maxslot; + BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot); + BCReg baseslot = J->baseslot; + maxslot += baseslot; + minslot += baseslot; + snap->nslots = (uint8_t)maxslot; + for (n = m = 0; n < nent; n++) { /* Remove unused slots from snapshot. */ + BCReg s = snap_slot(map[n]); + if (s < minslot || (s < maxslot && udf[s-baseslot] == 0)) + map[m++] = map[n]; /* Only copy used slots. */ + } + snap->nent = (uint8_t)m; + nlim = nent + snap->depth; + while (n <= nlim) map[m++] = map[n++]; /* Move PC + frame links down. */ + J->cur.nsnapmap = (uint16_t)(snap->mapofs + m); /* Free up space in map. */ +} + +/* -- Snapshot access ----------------------------------------------------- */ + +/* Initialize a Bloom Filter with all renamed refs. +** There are very few renames (often none), so the filter has +** very few bits set. This makes it suitable for negative filtering. +*/ +static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim) +{ + BloomFilter rfilt = 0; + IRIns *ir; + for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) + if (ir->op2 <= lim) + bloomset(rfilt, ir->op1); + return rfilt; +} + +/* Process matching renames to find the original RegSP. */ +static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs) +{ + IRIns *ir; + for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--) + if (ir->op1 == ref && ir->op2 <= lim) + rs = ir->prev; + return rs; +} + +/* Convert a snapshot into a linear slot -> RegSP map. +** Note: unused slots are not initialized! +*/ +void lj_snap_regspmap(uint16_t *rsmap, GCtrace *T, SnapNo snapno, int hi) +{ + SnapShot *snap = &T->snap[snapno]; + MSize n, nent = snap->nent; + SnapEntry *map = &T->snapmap[snap->mapofs]; + BloomFilter rfilt = snap_renamefilter(T, snapno); + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + if (!irref_isk(ref) && + ((LJ_SOFTFP && hi) ? (ref++, (sn & SNAP_SOFTFPNUM)) : 1)) { + IRIns *ir = &T->ir[ref]; + uint32_t rs = ir->prev; + if (bloomtest(rfilt, ref)) + rs = snap_renameref(T, snapno, ref, rs); + rsmap[snap_slot(sn)] = (uint16_t)rs; + } + } +} + +/* Restore interpreter state from exit state with the help of a snapshot. */ +const BCIns *lj_snap_restore(jit_State *J, void *exptr) +{ + ExitState *ex = (ExitState *)exptr; + SnapNo snapno = J->exitno; /* For now, snapno == exitno. */ + GCtrace *T = traceref(J, J->parent); + SnapShot *snap = &T->snap[snapno]; + MSize n, nent = snap->nent; + SnapEntry *map = &T->snapmap[snap->mapofs]; + SnapEntry *flinks = map + nent + snap->depth; + int32_t ftsz0; + BCReg nslots = snap->nslots; + TValue *frame; + BloomFilter rfilt = snap_renamefilter(T, snapno); + const BCIns *pc = snap_pc(map[nent]); + lua_State *L = J->L; + + /* Set interpreter PC to the next PC to get correct error messages. */ + setcframe_pc(cframe_raw(L->cframe), pc+1); + + /* Make sure the stack is big enough for the slots from the snapshot. */ + if (LJ_UNLIKELY(L->base + nslots > tvref(L->maxstack))) { + L->top = curr_topL(L); + lj_state_growstack(L, nslots - curr_proto(L)->framesize); + } + + /* Fill stack slots with data from the registers and spill slots. */ + frame = L->base-1; + ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */ + for (n = 0; n < nent; n++) { + SnapEntry sn = map[n]; + IRRef ref = snap_ref(sn); + BCReg s = snap_slot(sn); + TValue *o = &frame[s]; /* Stack slots are relative to start frame. */ + IRIns *ir = &T->ir[ref]; + if (irref_isk(ref)) { /* Restore constant slot. */ + lj_ir_kvalue(L, o, ir); + if ((sn & (SNAP_CONT|SNAP_FRAME))) { + /* Overwrite tag with frame link. */ + o->fr.tp.ftsz = s != 0 ? (int32_t)*flinks-- : ftsz0; + if ((sn & SNAP_FRAME)) { + GCfunc *fn = ir_kfunc(ir); + if (isluafunc(fn)) { + MSize framesize = funcproto(fn)->framesize; + L->base = ++o; + if (LJ_UNLIKELY(o + framesize > tvref(L->maxstack))) { + ptrdiff_t fsave = savestack(L, frame); + L->top = o; + lj_state_growstack(L, framesize); /* Grow again. */ + frame = restorestack(L, fsave); + } + } + } + } + } else if (!(sn & SNAP_NORESTORE)) { + IRType1 t = ir->t; + RegSP rs = ir->prev; + lua_assert(!(sn & (SNAP_CONT|SNAP_FRAME))); + if (LJ_UNLIKELY(bloomtest(rfilt, ref))) + rs = snap_renameref(T, snapno, ref, rs); + if (ra_hasspill(regsp_spill(rs))) { /* Restore from spill slot. */ + int32_t *sps = &ex->spill[regsp_spill(rs)]; + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { + o->u32.lo = (uint32_t)*sps; + } else if (irt_isinteger(t)) { + setintV(o, *sps); +#if !LJ_SOFTFP + } else if (irt_isnum(t)) { + o->u64 = *(uint64_t *)sps; +#endif +#if LJ_64 + } else if (irt_islightud(t)) { + /* 64 bit lightuserdata which may escape already has the tag bits. */ + o->u64 = *(uint64_t *)sps; +#endif + } else { + lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */ + setgcrefi(o->gcr, *sps); + setitype(o, irt_toitype(t)); + } + } else { /* Restore from register. */ + Reg r = regsp_reg(rs); + lua_assert(ra_hasreg(r)); + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { + o->u32.lo = (uint32_t)ex->gpr[r-RID_MIN_GPR]; + } else if (irt_isinteger(t)) { + setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]); +#if !LJ_SOFTFP + } else if (irt_isnum(t)) { + setnumV(o, ex->fpr[r-RID_MIN_FPR]); +#endif +#if LJ_64 + } else if (irt_islightud(t)) { + /* 64 bit lightuserdata which may escape already has the tag bits. */ + o->u64 = ex->gpr[r-RID_MIN_GPR]; +#endif + } else { + if (!irt_ispri(t)) + setgcrefi(o->gcr, ex->gpr[r-RID_MIN_GPR]); + setitype(o, irt_toitype(t)); + } + } + if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { + rs = (ir+1)->prev; + if (LJ_UNLIKELY(bloomtest(rfilt, ref+1))) + rs = snap_renameref(T, snapno, ref+1, rs); + o->u32.hi = (ra_hasspill(regsp_spill(rs))) ? + (uint32_t)*&ex->spill[regsp_spill(rs)] : + (uint32_t)ex->gpr[regsp_reg(rs)-RID_MIN_GPR]; + } + } + } + switch (bc_op(*pc)) { + case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM: + L->top = frame + nslots; + break; + default: + L->top = curr_topL(L); + break; + } + lua_assert(map + nent == flinks); + return pc; +} + +#undef IR + +#endif diff --git a/src/luajit2/src/lj_snap.h b/src/luajit2/src/lj_snap.h new file mode 100644 index 0000000000..da9813b92b --- /dev/null +++ b/src/luajit2/src/lj_snap.h @@ -0,0 +1,34 @@ +/* +** Snapshot handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_SNAP_H +#define _LJ_SNAP_H + +#include "lj_obj.h" +#include "lj_jit.h" + +#if LJ_HASJIT +LJ_FUNC void lj_snap_add(jit_State *J); +LJ_FUNC void lj_snap_purge(jit_State *J); +LJ_FUNC void lj_snap_shrink(jit_State *J); +LJ_FUNC void lj_snap_regspmap(uint16_t *rsmap, GCtrace *T, SnapNo snapno, + int hi); +LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); +LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); +LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need); + +static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need) +{ + if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need); +} + +static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need) +{ + if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need); +} + +#endif + +#endif diff --git a/src/luajit2/src/lj_state.c b/src/luajit2/src/lj_state.c new file mode 100644 index 0000000000..49f2d6bd02 --- /dev/null +++ b/src/luajit2/src/lj_state.c @@ -0,0 +1,280 @@ +/* +** State and stack handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_state_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_func.h" +#include "lj_meta.h" +#include "lj_state.h" +#include "lj_frame.h" +#if LJ_HASFFI +#include "lj_ctype.h" +#endif +#include "lj_trace.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_lex.h" +#include "lj_alloc.h" + +/* -- Stack handling ------------------------------------------------------ */ + +/* Stack sizes. */ +#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */ +#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */ +#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */ +#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA) + +/* Explanation of LJ_STACK_EXTRA: +** +** Calls to metamethods store their arguments beyond the current top +** without checking for the stack limit. This avoids stack resizes which +** would invalidate passed TValue pointers. The stack check is performed +** later by the function header. This can safely resize the stack or raise +** an error. Thus we need some extra slots beyond the current stack limit. +** +** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus +** one extra slot if mobj is not a function. Only lj_meta_tset needs 5 +** slots above top, but then mobj is always a function. So we can get by +** with 5 extra slots. +*/ + +/* Resize stack slots and adjust pointers in state. */ +static void resizestack(lua_State *L, MSize n) +{ + TValue *st, *oldst = tvref(L->stack); + ptrdiff_t delta; + MSize oldsize = L->stacksize; + MSize realsize = n + 1 + LJ_STACK_EXTRA; + GCobj *up; + lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1); + st = (TValue *)lj_mem_realloc(L, tvref(L->stack), + (MSize)(L->stacksize*sizeof(TValue)), + (MSize)(realsize*sizeof(TValue))); + setmref(L->stack, st); + delta = (char *)st - (char *)oldst; + setmref(L->maxstack, st + n); + while (oldsize < realsize) /* Clear new slots. */ + setnilV(st + oldsize++); + L->stacksize = realsize; + L->base = (TValue *)((char *)L->base + delta); + L->top = (TValue *)((char *)L->top + delta); + for (up = gcref(L->openupval); up != NULL; up = gcnext(up)) + setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta)); + if (obj2gco(L) == gcref(G(L)->jit_L)) + setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta); +} + +/* Relimit stack after error, in case the limit was overdrawn. */ +void lj_state_relimitstack(lua_State *L) +{ + if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1) + resizestack(L, LJ_STACK_MAX); +} + +/* Try to shrink the stack (called from GC). */ +void lj_state_shrinkstack(lua_State *L, MSize used) +{ + if (L->stacksize > LJ_STACK_MAXEX) + return; /* Avoid stack shrinking while handling stack overflow. */ + if (4*used < L->stacksize && + 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize && + obj2gco(L) != gcref(G(L)->jit_L)) /* Don't shrink stack of live trace. */ + resizestack(L, L->stacksize >> 1); +} + +/* Try to grow stack. */ +void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need) +{ + MSize n; + if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */ + lj_err_throw(L, LUA_ERRERR); + n = L->stacksize + need; + if (n > LJ_STACK_MAX) { + n += 2*LUA_MINSTACK; + } else if (n < 2*L->stacksize) { + n = 2*L->stacksize; + if (n >= LJ_STACK_MAX) + n = LJ_STACK_MAX; + } + resizestack(L, n); + if (L->stacksize > LJ_STACK_MAXEX) + lj_err_msg(L, LJ_ERR_STKOV); +} + +void LJ_FASTCALL lj_state_growstack1(lua_State *L) +{ + lj_state_growstack(L, 1); +} + +/* Allocate basic stack for new state. */ +static void stack_init(lua_State *L1, lua_State *L) +{ + TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue); + setmref(L1->stack, st); + L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA; + stend = st + L1->stacksize; + setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1); + L1->base = L1->top = st+1; + setthreadV(L1, st, L1); /* Needed for curr_funcisL() on empty stack. */ + while (st < stend) /* Clear new slots. */ + setnilV(st++); +} + +/* -- State handling ------------------------------------------------------ */ + +/* Open parts that may cause memory-allocation errors. */ +static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud) +{ + global_State *g = G(L); + UNUSED(dummy); + UNUSED(ud); + stack_init(L, L); + /* NOBARRIER: State initialization, all objects are white. */ + setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL))); + settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY)); + lj_str_resize(L, LJ_MIN_STRTAB-1); + lj_meta_init(L); + lj_lex_init(L); + fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */ + g->gc.threshold = 4*g->gc.total; + lj_trace_initstate(g); + return NULL; +} + +static void close_state(lua_State *L) +{ + global_State *g = G(L); + lj_func_closeuv(L, tvref(L->stack)); + lj_gc_freeall(g); + lua_assert(gcref(g->gc.root) == obj2gco(L)); + lua_assert(g->strnum == 0); + lj_trace_freestate(g); +#if LJ_HASFFI + lj_ctype_freestate(g); +#endif + lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); + lj_str_freebuf(g, &g->tmpbuf); + lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); + lua_assert(g->gc.total == sizeof(GG_State)); +#ifndef LUAJIT_USE_SYSMALLOC + if (g->allocf == lj_alloc_f) + lj_alloc_destroy(g->allocd); + else +#endif + g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0); +} + +#if LJ_64 +lua_State *lj_state_newstate(lua_Alloc f, void *ud) +#else +LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) +#endif +{ + GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State)); + lua_State *L = &GG->L; + global_State *g = &GG->g; + if (GG == NULL || !checkptr32(GG)) return NULL; + memset(GG, 0, sizeof(GG_State)); + L->gct = ~LJ_TTHREAD; + L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */ + L->dummy_ffid = FF_C; + setmref(L->glref, g); + g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED; + g->strempty.marked = LJ_GC_WHITE0; + g->strempty.gct = ~LJ_TSTR; + g->allocf = f; + g->allocd = ud; + setgcref(g->mainthref, obj2gco(L)); + setgcref(g->uvhead.prev, obj2gco(&g->uvhead)); + setgcref(g->uvhead.next, obj2gco(&g->uvhead)); + g->strmask = ~(MSize)0; + setnilV(registry(L)); + setnilV(&g->nilnode.val); + setnilV(&g->nilnode.key); + setmref(g->nilnode.freetop, &g->nilnode); + lj_str_initbuf(&g->tmpbuf); + g->gc.state = GCSpause; + setgcref(g->gc.root, obj2gco(L)); + setmref(g->gc.sweep, &g->gc.root); + g->gc.total = sizeof(GG_State); + g->gc.pause = LUAI_GCPAUSE; + g->gc.stepmul = LUAI_GCMUL; + lj_dispatch_init((GG_State *)L); + L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */ + if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) { + /* Memory allocation error: free partial state. */ + close_state(L); + return NULL; + } + L->status = 0; + return L; +} + +static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud) +{ + UNUSED(dummy); + UNUSED(ud); + lj_gc_finalize_udata(L); + lj_gc_finalize_cdata(L); + /* Frame pop omitted. */ + return NULL; +} + +LUA_API void lua_close(lua_State *L) +{ + global_State *g = G(L); + L = mainthread(g); /* Only the main thread can be closed. */ + lj_func_closeuv(L, tvref(L->stack)); + lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */ +#if LJ_HASJIT + G2J(g)->flags &= ~JIT_F_ON; + G2J(g)->state = LJ_TRACE_IDLE; + lj_dispatch_update(g); +#endif + do { + hook_enter(g); + L->status = 0; + L->cframe = NULL; + L->base = L->top = tvref(L->stack) + 1; + } while (lj_vm_cpcall(L, NULL, NULL, cpfinalize) != 0); + close_state(L); +} + +lua_State *lj_state_new(lua_State *L) +{ + lua_State *L1 = lj_mem_newobj(L, lua_State); + L1->gct = ~LJ_TTHREAD; + L1->dummy_ffid = FF_C; + L1->status = 0; + L1->stacksize = 0; + setmref(L1->stack, NULL); + L1->cframe = NULL; + /* NOBARRIER: The lua_State is new (marked white). */ + setgcrefnull(L1->openupval); + setmrefr(L1->glref, L->glref); + setgcrefr(L1->env, L->env); + stack_init(L1, L); /* init stack */ + lua_assert(iswhite(obj2gco(L1))); + return L1; +} + +void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) +{ + lua_assert(L != mainthread(g)); + lj_func_closeuv(L, tvref(L->stack)); + lua_assert(gcref(L->openupval) == NULL); + lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); + lj_mem_freet(g, L); +} + diff --git a/src/luajit2/src/lj_state.h b/src/luajit2/src/lj_state.h new file mode 100644 index 0000000000..d8d6104e42 --- /dev/null +++ b/src/luajit2/src/lj_state.h @@ -0,0 +1,35 @@ +/* +** State and stack handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STATE_H +#define _LJ_STATE_H + +#include "lj_obj.h" + +#define incr_top(L) \ + (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0)) + +#define savestack(L, p) ((char *)(p) - mref(L->stack, char)) +#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n))) + +LJ_FUNC void lj_state_relimitstack(lua_State *L); +LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used); +LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need); +LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L); + +static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need) +{ + if ((MSize)(mref(L->maxstack, char) - (char *)L->top) <= + need*(MSize)sizeof(TValue)) + lj_state_growstack(L, need); +} + +LJ_FUNC lua_State *lj_state_new(lua_State *L); +LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L); +#if LJ_64 +LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud); +#endif + +#endif diff --git a/src/luajit2/src/lj_str.c b/src/luajit2/src/lj_str.c new file mode 100644 index 0000000000..cd3a8b69e1 --- /dev/null +++ b/src/luajit2/src/lj_str.c @@ -0,0 +1,415 @@ +/* +** String handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <stdio.h> + +#define lj_str_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_str.h" +#include "lj_state.h" +#include "lj_char.h" + +/* -- String interning ---------------------------------------------------- */ + +/* Ordered compare of strings. Assumes string data is 4-byte aligned. */ +int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b) +{ + MSize i, n = a->len > b->len ? b->len : a->len; + for (i = 0; i < n; i += 4) { + /* Note: innocuous access up to end of string + 3. */ + uint32_t va = *(const uint32_t *)(strdata(a)+i); + uint32_t vb = *(const uint32_t *)(strdata(b)+i); + if (va != vb) { +#if LJ_LE + va = lj_bswap(va); vb = lj_bswap(vb); +#endif + i -= n; + if ((int32_t)i >= -3) { + va >>= 32+(i<<3); vb >>= 32+(i<<3); + if (va == vb) break; + } + return va < vb ? -1 : 1; + } + } + return (int32_t)(a->len - b->len); +} + +/* Fast string data comparison. Caveat: unaligned access to 1st string! */ +static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) +{ + MSize i = 0; + lua_assert(len > 0); + lua_assert((((uintptr_t)a + len) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4); + do { /* Note: innocuous access up to end of string + 3. */ + uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); + if (v) { + i -= len; +#if LJ_LE + return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1; +#else + return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1; +#endif + } + i += 4; + } while (i < len); + return 0; +} + +/* Resize the string hash table (grow and shrink). */ +void lj_str_resize(lua_State *L, MSize newmask) +{ + global_State *g = G(L); + GCRef *newhash; + MSize i; + if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1) + return; /* No resizing during GC traversal or if already too big. */ + newhash = lj_mem_newvec(L, newmask+1, GCRef); + memset(newhash, 0, (newmask+1)*sizeof(GCRef)); + for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */ + GCobj *p = gcref(g->strhash[i]); + while (p) { /* Follow each hash chain and reinsert all strings. */ + MSize h = gco2str(p)->hash & newmask; + GCobj *next = gcnext(p); + /* NOBARRIER: The string table is a GC root. */ + setgcrefr(p->gch.nextgc, newhash[h]); + setgcref(newhash[h], p); + p = next; + } + } + lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); + g->strmask = newmask; + g->strhash = newhash; +} + +/* Intern a string and return string object. */ +GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) +{ + global_State *g; + GCstr *s; + GCobj *o; + MSize len = (MSize)lenx; + MSize a, b, h = len; + if (lenx >= LJ_MAX_STR) + lj_err_msg(L, LJ_ERR_STROV); + g = G(L); + /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */ + if (len >= 4) { /* Caveat: unaligned access! */ + a = lj_getu32(str); + h ^= lj_getu32(str+len-4); + b = lj_getu32(str+(len>>1)-2); + h ^= b; h -= lj_rol(b, 14); + b += lj_getu32(str+(len>>2)-1); + } else if (len > 0) { + a = *(const uint8_t *)str; + h ^= *(const uint8_t *)(str+len-1); + b = *(const uint8_t *)(str+(len>>1)); + h ^= b; h -= lj_rol(b, 14); + } else { + return &g->strempty; + } + a ^= h; a -= lj_rol(h, 11); + b ^= a; b -= lj_rol(a, 25); + h ^= b; h -= lj_rol(b, 16); + /* Check if the string has already been interned. */ + o = gcref(g->strhash[h & g->strmask]); + if (LJ_LIKELY((((uintptr_t)str + len) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) { + while (o != NULL) { + GCstr *sx = gco2str(o); + if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) { + /* Resurrect if dead. Can only happen with fixstring() (keywords). */ + if (isdead(g, o)) flipwhite(o); + return sx; /* Return existing string. */ + } + o = gcnext(o); + } + } else { /* Slow path: end of string is too close to a page boundary. */ + while (o != NULL) { + GCstr *sx = gco2str(o); + if (sx->len == len && memcmp(str, strdata(sx), len) == 0) { + /* Resurrect if dead. Can only happen with fixstring() (keywords). */ + if (isdead(g, o)) flipwhite(o); + return sx; /* Return existing string. */ + } + o = gcnext(o); + } + } + /* Nope, create a new string. */ + s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr); + newwhite(g, s); + s->gct = ~LJ_TSTR; + s->len = len; + s->hash = h; + s->reserved = 0; + memcpy(strdatawr(s), str, len); + strdatawr(s)[len] = '\0'; /* Zero-terminate string. */ + /* Add it to string hash table. */ + h &= g->strmask; + s->nextgc = g->strhash[h]; + /* NOBARRIER: The string table is a GC root. */ + setgcref(g->strhash[h], obj2gco(s)); + if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */ + lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */ + return s; /* Return newly interned string. */ +} + +void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s) +{ + g->strnum--; + lj_mem_free(g, s, sizestring(s)); +} + +/* -- Type conversions ---------------------------------------------------- */ + +/* Convert string object to number. */ +int LJ_FASTCALL lj_str_tonum(GCstr *str, TValue *n) +{ + int ok = lj_str_numconv(strdata(str), n); + if (ok && tvisint(n)) + setnumV(n, (lua_Number)intV(n)); + return ok; +} + +int LJ_FASTCALL lj_str_tonumber(GCstr *str, TValue *n) +{ + return lj_str_numconv(strdata(str), n); +} + +/* Convert string to number. */ +int LJ_FASTCALL lj_str_numconv(const char *s, TValue *n) +{ +#if LJ_DUALNUM + int sign = 1; +#else + lua_Number sign = 1; +#endif + const uint8_t *p = (const uint8_t *)s; + while (lj_char_isspace(*p)) p++; + if (*p == '-') { p++; sign = -1; } else if (*p == '+') { p++; } + if ((uint32_t)(*p - '0') < 10) { + uint32_t k = (uint32_t)(*p++ - '0'); + if (k == 0 && ((*p & ~0x20) == 'X')) { + p++; + if (!lj_char_isxdigit(*p)) + return 0; /* Don't accept '0x' without hex digits. */ + do { + if (k >= 0x10000000u) goto parsedbl; + k = (k << 4) + (*p & 15u); + if (!lj_char_isdigit(*p)) k += 9; + p++; + } while (lj_char_isxdigit(*p)); + } else { + while ((uint32_t)(*p - '0') < 10) { + if (LJ_UNLIKELY(k >= 429496729) && (k != 429496729 || *p > '5')) + goto parsedbl; + k = k * 10u + (uint32_t)(*p++ - '0'); + } + } + while (LJ_UNLIKELY(lj_char_isspace(*p))) p++; + if (LJ_LIKELY(*p == '\0')) { +#if LJ_DUALNUM + if (sign == 1) { + if (k < 0x80000000u) { + setintV(n, (int32_t)k); + return 1; + } + } else if (k <= 0x80000000u) { + setintV(n, -(int32_t)k); + return 1; + } +#endif + setnumV(n, sign * (lua_Number)k); + return 1; + } + } +parsedbl: + { + TValue tv; + char *endptr; + setnumV(&tv, lua_str2number(s, &endptr)); + if (endptr == s) return 0; /* Conversion failed. */ + if (LJ_UNLIKELY(*endptr != '\0')) { + while (lj_char_isspace((uint8_t)*endptr)) endptr++; + if (*endptr != '\0') return 0; /* Invalid trailing characters? */ + } + if (LJ_LIKELY(!tvisnan(&tv))) + setnumV(n, numV(&tv)); + else + setnanV(n); /* Canonicalize injected NaNs. */ + return 1; + } +} + +/* Print number to buffer. Canonicalizes non-finite values. */ +size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o) +{ + if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */ + lua_Number n = o->n; + return (size_t)lua_number2str(s, n); + } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) { + s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3; + } else if ((o->u32.hi & 0x80000000) == 0) { + s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3; + } else { + s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4; + } +} + +/* Print integer to buffer. Returns pointer to start. */ +char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k) +{ + uint32_t u = (uint32_t)(k < 0 ? -k : k); + p += 1+10; + do { *--p = (char)('0' + u % 10); } while (u /= 10); + if (k < 0) *--p = '-'; + return p; +} + +/* Convert number to string. */ +GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np) +{ + char buf[LJ_STR_NUMBUF]; + size_t len = lj_str_bufnum(buf, (TValue *)np); + return lj_str_new(L, buf, len); +} + +/* Convert integer to string. */ +GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k) +{ + char s[1+10]; + char *p = lj_str_bufint(s, k); + return lj_str_new(L, p, (size_t)(s+sizeof(s)-p)); +} + +GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o) +{ + return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n); +} + +/* -- String formatting --------------------------------------------------- */ + +static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len) +{ + char *p; + MSize i; + if (sb->n + len > sb->sz) { + MSize sz = sb->sz * 2; + while (sb->n + len > sz) sz = sz * 2; + lj_str_resizebuf(L, sb, sz); + } + p = sb->buf + sb->n; + sb->n += len; + for (i = 0; i < len; i++) p[i] = str[i]; +} + +static void addchar(lua_State *L, SBuf *sb, int c) +{ + if (sb->n + 1 > sb->sz) { + MSize sz = sb->sz * 2; + lj_str_resizebuf(L, sb, sz); + } + sb->buf[sb->n++] = (char)c; +} + +/* Push formatted message as a string object to Lua stack. va_list variant. */ +const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp) +{ + SBuf *sb = &G(L)->tmpbuf; + lj_str_needbuf(L, sb, (MSize)strlen(fmt)); + lj_str_resetbuf(sb); + for (;;) { + const char *e = strchr(fmt, '%'); + if (e == NULL) break; + addstr(L, sb, fmt, (MSize)(e-fmt)); + /* This function only handles %s, %c, %d, %f and %p formats. */ + switch (e[1]) { + case 's': { + const char *s = va_arg(argp, char *); + if (s == NULL) s = "(null)"; + addstr(L, sb, s, (MSize)strlen(s)); + break; + } + case 'c': + addchar(L, sb, va_arg(argp, int)); + break; + case 'd': { + char buf[LJ_STR_INTBUF]; + char *p = lj_str_bufint(buf, va_arg(argp, int32_t)); + addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p)); + break; + } + case 'f': { + char buf[LJ_STR_NUMBUF]; + TValue tv; + MSize len; + tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER)); + len = (MSize)lj_str_bufnum(buf, &tv); + addstr(L, sb, buf, len); + break; + } + case 'p': { +#define FMTP_CHARS (2*sizeof(ptrdiff_t)) + char buf[2+FMTP_CHARS]; + ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *)); + ptrdiff_t i, lasti = 2+FMTP_CHARS; + if (p == 0) { + addstr(L, sb, "NULL", 4); + break; + } +#if LJ_64 + /* Shorten output for 64 bit pointers. */ + lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0); +#endif + buf[0] = '0'; + buf[1] = 'x'; + for (i = lasti-1; i >= 2; i--, p >>= 4) + buf[i] = "0123456789abcdef"[(p & 15)]; + addstr(L, sb, buf, (MSize)lasti); + break; + } + case '%': + addchar(L, sb, '%'); + break; + default: + addchar(L, sb, '%'); + addchar(L, sb, e[1]); + break; + } + fmt = e+2; + } + addstr(L, sb, fmt, (MSize)strlen(fmt)); + setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n)); + incr_top(L); + return strVdata(L->top - 1); +} + +/* Push formatted message as a string object to Lua stack. Vararg variant. */ +const char *lj_str_pushf(lua_State *L, const char *fmt, ...) +{ + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = lj_str_pushvf(L, fmt, argp); + va_end(argp); + return msg; +} + +/* -- Buffer handling ----------------------------------------------------- */ + +char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz) +{ + if (sz > sb->sz) { + if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF; + lj_str_resizebuf(L, sb, sz); + } + return sb->buf; +} + diff --git a/src/luajit2/src/lj_str.h b/src/luajit2/src/lj_str.h new file mode 100644 index 0000000000..fbd927c027 --- /dev/null +++ b/src/luajit2/src/lj_str.h @@ -0,0 +1,53 @@ +/* +** String handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_STR_H +#define _LJ_STR_H + +#include <stdarg.h> + +#include "lj_obj.h" + +/* String interning. */ +LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b); +LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask); +LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len); +LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s); + +#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s))) +#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1)) + +/* Type conversions. */ +LJ_FUNC int LJ_FASTCALL lj_str_numconv(const char *s, TValue *n); +LJ_FUNC int LJ_FASTCALL lj_str_tonum(GCstr *str, TValue *n); +LJ_FUNC int LJ_FASTCALL lj_str_tonumber(GCstr *str, TValue *n); +LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o); +LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k); +LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np); +LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k); +LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o); + +#define LJ_STR_INTBUF (1+10) +#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR + +/* String formatting. */ +LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp); +LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...) +#if defined(__GNUC__) + __attribute__ ((format (printf, 2, 3))) +#endif + ; + +/* Resizable string buffers. Struct definition in lj_obj.h. */ +LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz); + +#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0) +#define lj_str_resetbuf(sb) ((sb)->n = 0) +#define lj_str_resizebuf(L, sb, size) \ + ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \ + (sb)->sz = (size)) +#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz) + +#endif diff --git a/src/luajit2/src/lj_tab.c b/src/luajit2/src/lj_tab.c new file mode 100644 index 0000000000..cd200f021d --- /dev/null +++ b/src/luajit2/src/lj_tab.c @@ -0,0 +1,622 @@ +/* +** Table handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#define lj_tab_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_tab.h" + +/* -- Object hashing ------------------------------------------------------ */ + +/* Hash values are masked with the table hash mask and used as an index. */ +static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash) +{ + Node *n = noderef(t->node); + return &n[hash & t->hmask]; +} + +/* String hashes are precomputed when they are interned. */ +#define hashstr(t, s) hashmask(t, (s)->hash) + +#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi))) +#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1)) +#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS) +#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS) + +/* Hash an arbitrary key and return its anchor position in the hash table. */ +static Node *hashkey(const GCtab *t, cTValue *key) +{ + lua_assert(!tvisint(key)); + if (tvisstr(key)) + return hashstr(t, strV(key)); + else if (tvisnum(key)) + return hashnum(t, key); + else if (tvisbool(key)) + return hashmask(t, boolV(key)); + else + return hashgcref(t, key->gcr); + /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */ +} + +/* -- Table creation and destruction -------------------------------------- */ + +/* Create new hash part for table. */ +static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits) +{ + uint32_t hsize; + Node *node; + lua_assert(hbits != 0); + if (hbits > LJ_MAX_HBITS) + lj_err_msg(L, LJ_ERR_TABOV); + hsize = 1u << hbits; + node = lj_mem_newvec(L, hsize, Node); + setmref(node->freetop, &node[hsize]); + setmref(t->node, node); + t->hmask = hsize-1; +} + +/* +** Q: Why all of these copies of t->hmask, t->node etc. to local variables? +** A: Because alias analysis for C is _really_ tough. +** Even state-of-the-art C compilers won't produce good code without this. +*/ + +/* Clear hash part of table. */ +static LJ_AINLINE void clearhpart(GCtab *t) +{ + uint32_t i, hmask = t->hmask; + Node *node = noderef(t->node); + lua_assert(t->hmask != 0); + for (i = 0; i <= hmask; i++) { + Node *n = &node[i]; + setmref(n->next, NULL); + setnilV(&n->key); + setnilV(&n->val); + } +} + +/* Clear array part of table. */ +static LJ_AINLINE void clearapart(GCtab *t) +{ + uint32_t i, asize = t->asize; + TValue *array = tvref(t->array); + for (i = 0; i < asize; i++) + setnilV(&array[i]); +} + +/* Create a new table. Note: the slots are not initialized (yet). */ +static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits) +{ + GCtab *t; + /* First try to colocate the array part. */ + if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { + lua_assert((sizeof(GCtab) & 7) == 0); + t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); + t->gct = ~LJ_TTAB; + t->nomm = (uint8_t)~0; + t->colo = (int8_t)asize; + setmref(t->array, (TValue *)((char *)t + sizeof(GCtab))); + setgcrefnull(t->metatable); + t->asize = asize; + t->hmask = 0; + setmref(t->node, &G(L)->nilnode); + } else { /* Otherwise separately allocate the array part. */ + t = lj_mem_newobj(L, GCtab); + t->gct = ~LJ_TTAB; + t->nomm = (uint8_t)~0; + t->colo = 0; + setmref(t->array, NULL); + setgcrefnull(t->metatable); + t->asize = 0; /* In case the array allocation fails. */ + t->hmask = 0; + setmref(t->node, &G(L)->nilnode); + if (asize > 0) { + if (asize > LJ_MAX_ASIZE) + lj_err_msg(L, LJ_ERR_TABOV); + setmref(t->array, lj_mem_newvec(L, asize, TValue)); + t->asize = asize; + } + } + if (hbits) + newhpart(L, t, hbits); + return t; +} + +/* Create a new table. +** +** IMPORTANT NOTE: The API differs from lua_createtable()! +** +** The array size is non-inclusive. E.g. asize=128 creates array slots +** for 0..127, but not for 128. If you need slots 1..128, pass asize=129 +** (slot 0 is wasted in this case). +** +** The hash size is given in hash bits. hbits=0 means no hash part. +** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on. +*/ +GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits) +{ + GCtab *t = newtab(L, asize, hbits); + clearapart(t); + if (t->hmask > 0) clearhpart(t); + return t; +} + +#if LJ_HASJIT +GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize) +{ + GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24); + clearapart(t); + if (t->hmask > 0) clearhpart(t); + return t; +} +#endif + +/* Duplicate a table. */ +GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt) +{ + GCtab *t; + uint32_t asize, hmask; + t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); + lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); + t->nomm = 0; /* Keys with metamethod names may be present. */ + asize = kt->asize; + if (asize > 0) { + TValue *array = tvref(t->array); + TValue *karray = tvref(kt->array); + if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */ + uint32_t i; + for (i = 0; i < asize; i++) + copyTV(L, &array[i], &karray[i]); + } else { + memcpy(array, karray, asize*sizeof(TValue)); + } + } + hmask = kt->hmask; + if (hmask > 0) { + uint32_t i; + Node *node = noderef(t->node); + Node *knode = noderef(kt->node); + ptrdiff_t d = (char *)node - (char *)knode; + setmref(node->freetop, (Node *)((char *)noderef(knode->freetop) + d)); + for (i = 0; i <= hmask; i++) { + Node *kn = &knode[i]; + Node *n = &node[i]; + Node *next = nextnode(kn); + /* Don't use copyTV here, since it asserts on a copy of a dead key. */ + n->val = kn->val; n->key = kn->key; + setmref(n->next, next == NULL? next : (Node *)((char *)next + d)); + } + } + return t; +} + +/* Free a table. */ +void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t) +{ + if (t->hmask > 0) + lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node); + if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0) + lj_mem_freevec(g, tvref(t->array), t->asize, TValue); + if (LJ_MAX_COLOSIZE != 0 && t->colo) + lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f)); + else + lj_mem_freet(g, t); +} + +/* -- Table resizing ------------------------------------------------------ */ + +/* Resize a table to fit the new array/hash part sizes. */ +static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) +{ + Node *oldnode = noderef(t->node); + uint32_t oldasize = t->asize; + uint32_t oldhmask = t->hmask; + if (asize > oldasize) { /* Array part grows? */ + TValue *array; + uint32_t i; + if (asize > LJ_MAX_ASIZE) + lj_err_msg(L, LJ_ERR_TABOV); + if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) { + /* A colocated array must be separated and copied. */ + TValue *oarray = tvref(t->array); + array = lj_mem_newvec(L, asize, TValue); + t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */ + for (i = 0; i < oldasize; i++) + copyTV(L, &array[i], &oarray[i]); + } else { + array = (TValue *)lj_mem_realloc(L, tvref(t->array), + oldasize*sizeof(TValue), asize*sizeof(TValue)); + } + setmref(t->array, array); + t->asize = asize; + for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */ + setnilV(&array[i]); + } + /* Create new (empty) hash part. */ + if (hbits) { + newhpart(L, t, hbits); + clearhpart(t); + } else { + global_State *g = G(L); + setmref(t->node, &g->nilnode); + t->hmask = 0; + } + if (asize < oldasize) { /* Array part shrinks? */ + TValue *array = tvref(t->array); + uint32_t i; + t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */ + for (i = asize; i < oldasize; i++) /* Reinsert old array values. */ + if (!tvisnil(&array[i])) + copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]); + /* Physically shrink only separated arrays. */ + if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0) + setmref(t->array, lj_mem_realloc(L, array, + oldasize*sizeof(TValue), asize*sizeof(TValue))); + } + if (oldhmask > 0) { /* Reinsert pairs from old hash part. */ + global_State *g; + uint32_t i; + for (i = 0; i <= oldhmask; i++) { + Node *n = &oldnode[i]; + if (!tvisnil(&n->val)) + copyTV(L, lj_tab_set(L, t, &n->key), &n->val); + } + g = G(L); + lj_mem_freevec(g, oldnode, oldhmask+1, Node); + } +} + +static uint32_t countint(cTValue *key, uint32_t *bins) +{ + lua_assert(!tvisint(key)); + if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) { + bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++; + return 1; + } + } + return 0; +} + +static uint32_t countarray(const GCtab *t, uint32_t *bins) +{ + uint32_t na, b, i; + if (t->asize == 0) return 0; + for (na = i = b = 0; b < LJ_MAX_ABITS; b++) { + uint32_t n, top = 2u << b; + TValue *array; + if (top >= t->asize) { + top = t->asize-1; + if (i > top) + break; + } + array = tvref(t->array); + for (n = 0; i <= top; i++) + if (!tvisnil(&array[i])) + n++; + bins[b] += n; + na += n; + } + return na; +} + +static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray) +{ + uint32_t total, na, i, hmask = t->hmask; + Node *node = noderef(t->node); + for (total = na = 0, i = 0; i <= hmask; i++) { + Node *n = &node[i]; + if (!tvisnil(&n->val)) { + na += countint(&n->key, bins); + total++; + } + } + *narray += na; + return total; +} + +static uint32_t bestasize(uint32_t bins[], uint32_t *narray) +{ + uint32_t b, sum, na = 0, sz = 0, nn = *narray; + for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++) + if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) { + sz = (2u<<b)+1; + na = sum; + } + *narray = sz; + return na; +} + +static void rehashtab(lua_State *L, GCtab *t, cTValue *ek) +{ + uint32_t bins[LJ_MAX_ABITS]; + uint32_t total, asize, na, i; + for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0; + asize = countarray(t, bins); + total = 1 + asize; + total += counthash(t, bins, &asize); + asize += countint(ek, bins); + na = bestasize(bins, &asize); + total -= na; + resizetab(L, t, asize, hsize2hbits(total)); +} + +void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize) +{ + resizetab(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0); +} + +/* -- Table getters ------------------------------------------------------- */ + +cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key) +{ + TValue k; + Node *n; + k.n = (lua_Number)key; + n = hashnum(t, &k); + do { + if (tvisnum(&n->key) && n->key.n == k.n) + return &n->val; + } while ((n = nextnode(n))); + return NULL; +} + +cTValue *lj_tab_getstr(GCtab *t, GCstr *key) +{ + Node *n = hashstr(t, key); + do { + if (tvisstr(&n->key) && strV(&n->key) == key) + return &n->val; + } while ((n = nextnode(n))); + return NULL; +} + +cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key) +{ + if (tvisstr(key)) { + cTValue *tv = lj_tab_getstr(t, strV(key)); + if (tv) + return tv; + } else if (tvisint(key)) { + cTValue *tv = lj_tab_getint(t, intV(key)); + if (tv) + return tv; + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if (nk == (lua_Number)k) { + cTValue *tv = lj_tab_getint(t, k); + if (tv) + return tv; + } else { + goto genlookup; /* Else use the generic lookup. */ + } + } else if (!tvisnil(key)) { + Node *n; + genlookup: + n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return &n->val; + } while ((n = nextnode(n))); + } + return niltv(L); +} + +/* -- Table setters ------------------------------------------------------- */ + +/* Insert new key. Use Brent's variation to optimize the chain length. */ +TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) +{ + Node *n = hashkey(t, key); + if (!tvisnil(&n->val) || t->hmask == 0) { + Node *nodebase = noderef(t->node); + Node *collide, *freenode = noderef(nodebase->freetop); + lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); + do { + if (freenode == nodebase) { /* No free node found? */ + rehashtab(L, t, key); /* Rehash table. */ + return lj_tab_set(L, t, key); /* Retry key insertion. */ + } + } while (!tvisnil(&(--freenode)->key)); + setmref(nodebase->freetop, freenode); + lua_assert(freenode != &G(L)->nilnode); + collide = hashkey(t, &n->key); + if (collide != n) { /* Colliding node not the main node? */ + while (noderef(collide->next) != n) /* Find predecessor. */ + collide = nextnode(collide); + setmref(collide->next, freenode); /* Relink chain. */ + /* Copy colliding node into free node and free main node. */ + freenode->val = n->val; + freenode->key = n->key; + freenode->next = n->next; + setmref(n->next, NULL); + setnilV(&n->val); + /* Rechain pseudo-resurrected string keys with colliding hashes. */ + while (nextnode(freenode)) { + Node *nn = nextnode(freenode); + if (tvisstr(&nn->key) && !tvisnil(&nn->val) && + hashstr(t, strV(&nn->key)) == n) { + freenode->next = nn->next; + nn->next = n->next; + setmref(n->next, nn); + } else { + freenode = nn; + } + } + } else { /* Otherwise use free node. */ + setmrefr(freenode->next, n->next); /* Insert into chain. */ + setmref(n->next, freenode); + n = freenode; + } + } + n->key.u64 = key->u64; + if (LJ_UNLIKELY(tvismzero(&n->key))) + n->key.u64 = 0; + lj_gc_anybarriert(L, t); + lua_assert(tvisnil(&n->val)); + return &n->val; +} + +TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key) +{ + TValue k; + Node *n; + k.n = (lua_Number)key; + n = hashnum(t, &k); + do { + if (tvisnum(&n->key) && n->key.n == k.n) + return &n->val; + } while ((n = nextnode(n))); + return lj_tab_newkey(L, t, &k); +} + +TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key) +{ + TValue k; + Node *n = hashstr(t, key); + do { + if (tvisstr(&n->key) && strV(&n->key) == key) + return &n->val; + } while ((n = nextnode(n))); + setstrV(L, &k, key); + return lj_tab_newkey(L, t, &k); +} + +TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key) +{ + Node *n; + t->nomm = 0; /* Invalidate negative metamethod cache. */ + if (tvisstr(key)) { + return lj_tab_setstr(L, t, strV(key)); + } else if (tvisint(key)) { + return lj_tab_setint(L, t, intV(key)); + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if (nk == (lua_Number)k) + return lj_tab_setint(L, t, k); + if (tvisnan(key)) + lj_err_msg(L, LJ_ERR_NANIDX); + /* Else use the generic lookup. */ + } else if (tvisnil(key)) { + lj_err_msg(L, LJ_ERR_NILIDX); + } + n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return &n->val; + } while ((n = nextnode(n))); + return lj_tab_newkey(L, t, key); +} + +/* -- Table traversal ----------------------------------------------------- */ + +/* Get the traversal index of a key. */ +static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key) +{ + TValue tmp; + if (tvisint(key)) { + int32_t k = intV(key); + if ((uint32_t)k < t->asize) + return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ + setnumV(&tmp, (lua_Number)k); + key = &tmp; + } else if (tvisnum(key)) { + lua_Number nk = numV(key); + int32_t k = lj_num2int(nk); + if ((uint32_t)k < t->asize && nk == (lua_Number)k) + return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ + } + if (!tvisnil(key)) { + Node *n = hashkey(t, key); + do { + if (lj_obj_equal(&n->key, key)) + return t->asize + (uint32_t)(n - noderef(t->node)); + /* Hash key indexes: [t->asize..t->asize+t->nmask] */ + } while ((n = nextnode(n))); + lj_err_msg(L, LJ_ERR_NEXTIDX); + return 0; /* unreachable */ + } + return ~0u; /* A nil key starts the traversal. */ +} + +/* Advance to the next step in a table traversal. */ +int lj_tab_next(lua_State *L, GCtab *t, TValue *key) +{ + uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */ + for (i++; i < t->asize; i++) /* First traverse the array keys. */ + if (!tvisnil(arrayslot(t, i))) { + setintV(key, i); + copyTV(L, key+1, arrayslot(t, i)); + return 1; + } + for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */ + Node *n = &noderef(t->node)[i]; + if (!tvisnil(&n->val)) { + copyTV(L, key, &n->key); + copyTV(L, key+1, &n->val); + return 1; + } + } + return 0; /* End of traversal. */ +} + +/* -- Table length calculation -------------------------------------------- */ + +static MSize unbound_search(GCtab *t, MSize j) +{ + cTValue *tv; + MSize i = j; /* i is zero or a present index */ + j++; + /* find `i' and `j' such that i is present and j is not */ + while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) { + i = j; + j *= 2; + if (j > (MSize)(INT_MAX-2)) { /* overflow? */ + /* table was built with bad purposes: resort to linear search */ + i = 1; + while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++; + return i - 1; + } + } + /* now do a binary search between them */ + while (j - i > 1) { + MSize m = (i+j)/2; + cTValue *tvb = lj_tab_getint(t, (int32_t)m); + if (tvb && !tvisnil(tvb)) i = m; else j = m; + } + return i; +} + +/* +** Try to find a boundary in table `t'. A `boundary' is an integer index +** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). +*/ +MSize LJ_FASTCALL lj_tab_len(GCtab *t) +{ + MSize j = (MSize)t->asize; + if (j > 1 && tvisnil(arrayslot(t, j-1))) { + MSize i = 1; + while (j - i > 1) { + MSize m = (i+j)/2; + if (tvisnil(arrayslot(t, m-1))) j = m; else i = m; + } + return i-1; + } + if (j) j--; + if (t->hmask <= 0) + return j; + return unbound_search(t, j); +} + diff --git a/src/luajit2/src/lj_tab.h b/src/luajit2/src/lj_tab.h new file mode 100644 index 0000000000..76e96f130d --- /dev/null +++ b/src/luajit2/src/lj_tab.h @@ -0,0 +1,67 @@ +/* +** Table handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TAB_H +#define _LJ_TAB_H + +#include "lj_obj.h" + +/* Hash constants. Tuned using a brute force search. */ +#define HASH_BIAS (-0x04c11db7) +#define HASH_ROT1 14 +#define HASH_ROT2 5 +#define HASH_ROT3 13 + +/* Scramble the bits of numbers and pointers. */ +static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi) +{ +#if LJ_TARGET_X86ORX64 + /* Prefer variant that compiles well for a 2-operand CPU. */ + lo ^= hi; hi = lj_rol(hi, HASH_ROT1); + lo -= hi; hi = lj_rol(hi, HASH_ROT2); + hi ^= lo; hi -= lj_rol(lo, HASH_ROT3); +#else + lo ^= hi; + lo = lo - lj_rol(hi, HASH_ROT1); + hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2); + hi = hi - lj_rol(lo, HASH_ROT3); +#endif + return hi; +} + +#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0) + +LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits); +#if LJ_HASJIT +LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize); +#endif +LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt); +LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t); +LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize); + +/* Caveat: all getters except lj_tab_get() can return NULL! */ + +LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key); +LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key); +LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key); + +/* Caveat: all setters require a write barrier for the stored value. */ + +LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key); +LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key); +LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key); +LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key); + +#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize) +#define arrayslot(t, i) (&tvref((t)->array)[(i)]) +#define lj_tab_getint(t, key) \ + (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key))) +#define lj_tab_setint(L, t, key) \ + (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key))) + +LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key); +LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t); + +#endif diff --git a/src/luajit2/src/lj_target.h b/src/luajit2/src/lj_target.h new file mode 100644 index 0000000000..410ad0a0fc --- /dev/null +++ b/src/luajit2/src/lj_target.h @@ -0,0 +1,142 @@ +/* +** Definitions for target CPU. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_H +#define _LJ_TARGET_H + +#include "lj_def.h" +#include "lj_arch.h" + +/* -- Registers and spill slots ------------------------------------------- */ + +/* Register type (uint8_t in ir->r). */ +typedef uint32_t Reg; + +/* The hi-bit is NOT set for an allocated register. This means the value +** can be directly used without masking. The hi-bit is set for a register +** allocation hint or for RID_INIT. +*/ +#define RID_NONE 0x80 +#define RID_MASK 0x7f +#define RID_INIT (RID_NONE|RID_MASK) + +#define ra_noreg(r) ((r) & RID_NONE) +#define ra_hasreg(r) (!((r) & RID_NONE)) + +/* The ra_hashint() macro assumes a previous test for ra_noreg(). */ +#define ra_hashint(r) ((r) != RID_INIT) +#define ra_gethint(r) ((Reg)((r) & RID_MASK)) +#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE) +#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0) + +/* Spill slot 0 means no spill slot has been allocated. */ +#define SPS_NONE 0 + +#define ra_hasspill(s) ((s) != SPS_NONE) + +/* Combined register and spill slot (uint16_t in ir->prev). */ +typedef uint32_t RegSP; + +#define REGSP(r, s) ((r) + ((s) << 8)) +#define REGSP_HINT(r) ((r)|RID_NONE) +#define REGSP_INIT REGSP(RID_INIT, 0) + +#define regsp_reg(rs) ((rs) & 255) +#define regsp_spill(rs) ((rs) >> 8) +#define regsp_used(rs) \ + (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0)) + +/* -- Register sets ------------------------------------------------------- */ + +/* Bitset for registers. 32 registers suffice right now. +** Note that one set holds bits for both GPRs and FPRs. +*/ +typedef uint32_t RegSet; + +#define RID2RSET(r) (((RegSet)1) << (r)) +#define RSET_EMPTY 0 +#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo)) + +#define rset_test(rs, r) (((rs) >> (r)) & 1) +#define rset_set(rs, r) (rs |= RID2RSET(r)) +#define rset_clear(rs, r) (rs &= ~RID2RSET(r)) +#define rset_exclude(rs, r) (rs & ~RID2RSET(r)) +#define rset_picktop(rs) ((Reg)lj_fls(rs)) +#define rset_pickbot(rs) ((Reg)lj_ffs(rs)) + +/* -- Register allocation cost -------------------------------------------- */ + +/* The register allocation heuristic keeps track of the cost for allocating +** a specific register: +** +** A free register (obviously) has a cost of 0 and a 1-bit in the free mask. +** +** An already allocated register has the (non-zero) IR reference in the lowest +** bits and the result of a blended cost-model in the higher bits. +** +** The allocator first checks the free mask for a hit. Otherwise an (unrolled) +** linear search for the minimum cost is used. The search doesn't need to +** keep track of the position of the minimum, which makes it very fast. +** The lowest bits of the minimum cost show the desired IR reference whose +** register is the one to evict. +** +** Without the cost-model this degenerates to the standard heuristics for +** (reverse) linear-scan register allocation. Since code generation is done +** in reverse, a live interval extends from the last use to the first def. +** For an SSA IR the IR reference is the first (and only) def and thus +** trivially marks the end of the interval. The LSRA heuristics says to pick +** the register whose live interval has the furthest extent, i.e. the lowest +** IR reference in our case. +** +** A cost-model should take into account other factors, like spill-cost and +** restore- or rematerialization-cost, which depend on the kind of instruction. +** E.g. constants have zero spill costs, variant instructions have higher +** costs than invariants and PHIs should preferably never be spilled. +** +** Here's a first cut at simple, but effective blended cost-model for R-LSRA: +** - Due to careful design of the IR, constants already have lower IR +** references than invariants and invariants have lower IR references +** than variants. +** - The cost in the upper 16 bits is the sum of the IR reference and a +** weighted score. The score currently only takes into account whether +** the IRT_ISPHI bit is set in the instruction type. +** - The PHI weight is the minimum distance (in IR instructions) a PHI +** reference has to be further apart from a non-PHI reference to be spilled. +** - It should be a power of two (for speed) and must be between 2 and 32768. +** Good values for the PHI weight seem to be between 40 and 150. +** - Further study is required. +*/ +#define REGCOST_PHI_WEIGHT 64 + +/* Cost for allocating a specific register. */ +typedef uint32_t RegCost; + +/* Note: assumes 16 bit IRRef1. */ +#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16)) +#define regcost_ref(rc) ((IRRef1)(rc)) + +#define REGCOST_T(t) \ + ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI)) +#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t))) + +/* -- Target-specific definitions ----------------------------------------- */ + +#if LJ_TARGET_X86ORX64 +#include "lj_target_x86.h" +#elif LJ_TARGET_ARM +#include "lj_target_arm.h" +#else +#error "Missing include for target CPU" +#endif + +/* Return the address of an exit stub. */ +static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno) +{ + lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL); + return (MCode *)((char *)J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] + + EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP)); +} + +#endif diff --git a/src/luajit2/src/lj_target_arm.h b/src/luajit2/src/lj_target_arm.h new file mode 100644 index 0000000000..78a5679d61 --- /dev/null +++ b/src/luajit2/src/lj_target_arm.h @@ -0,0 +1,205 @@ +/* +** Definitions for ARM CPUs. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_ARM_H +#define _LJ_TARGET_ARM_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#define GPRDEF(_) \ + _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \ + _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC) +#if LJ_SOFTFP +#define FPRDEF(_) +#else +#error "NYI: hard-float support for ARM" +#endif +#define VRIDDEF(_) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_TMP = RID_LR, + + /* Calling conventions. */ + RID_RET = RID_R0, + RID_RETHI = RID_R1, + RID_FPRET = RID_R0, + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_R9, /* Interpreter BASE. */ + RID_LPC = RID_R6, /* Interpreter PC. */ + RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */ + RID_LREG = RID_R8, /* Interpreter L. */ + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_R0, + RID_MAX_GPR = RID_PC+1, + RID_MIN_FPR = RID_MAX_GPR, +#if LJ_SOFTFP + RID_MAX_FPR = RID_MIN_FPR, +#else +#error "NYI: VFP support for ARM" +#endif + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR +}; + +#define RID_NUM_KREF RID_NUM_GPR +#define RID_MIN_KREF RID_R0 + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except sp, lr and pc. */ +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1)) +#define RSET_GPREVEN \ + (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \ + RID2RSET(RID_R8)|RID2RSET(RID_R10)) +#define RSET_GPRODD \ + (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \ + RID2RSET(RID_R9)|RID2RSET(RID_R11)) +#if LJ_SOFTFP +#define RSET_FPR 0 +#define RSET_ALL RSET_GPR +#else +#error "NYI: VFP support for ARM" +#endif +#define RSET_INIT RSET_ALL + +/* ABI-specific register sets. lr is an implicit scratch register. */ +#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12)) +#ifdef __APPLE__ +#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9)) +#else +#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_ +#endif +#if LJ_SOFTFP +#define RSET_SCRATCH_FPR 0 +#else +#error "NYI: VFP support for ARM" +#endif +#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) +#define REGARG_FIRSTGPR RID_R0 +#define REGARG_LASTGPR RID_R3 +#define REGARG_NUMGPR 4 + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. +*/ +#define SPS_FIXED 2 +#define SPS_FIRST 2 + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { +#if !LJ_SOFTFP + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ +#endif + int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* PC after instruction that caused an exit. Used to find the trace number. */ +#define EXITSTATE_PCREG RID_PC + +#define EXITSTUB_SPACING 4 +#define EXITSTUBS_PER_GROUP 32 + +/* -- Instructions -------------------------------------------------------- */ + +/* Instruction fields. */ +#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28)) +#define ARMF_N(r) ((r) << 16) +#define ARMF_D(r) ((r) << 12) +#define ARMF_S(r) ((r) << 8) +#define ARMF_M(r) (r) +#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7)) +#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r)) + +typedef enum ARMIns { + ARMI_CCAL = 0xe0000000, + ARMI_S = 0x000100000, + ARMI_K12 = 0x02000000, + ARMI_KNEG = 0x00200000, + ARMI_LS_U = 0x00800000, + ARMI_LS_P = 0x01000000, + ARMI_LS_R = 0x02000000, + ARMI_LSX_I = 0x00400000, + + ARMI_AND = 0xe0000000, + ARMI_EOR = 0xe0200000, + ARMI_SUB = 0xe0400000, + ARMI_RSB = 0xe0600000, + ARMI_ADD = 0xe0800000, + ARMI_ADC = 0xe0a00000, + ARMI_SBC = 0xe0c00000, + ARMI_RSC = 0xe0e00000, + ARMI_TST = 0xe1100000, + ARMI_TEQ = 0xe1300000, + ARMI_CMP = 0xe1500000, + ARMI_CMN = 0xe1700000, + ARMI_ORR = 0xe1800000, + ARMI_MOV = 0xe1a00000, + ARMI_BIC = 0xe1c00000, + ARMI_MVN = 0xe1e00000, + + ARMI_NOP = 0xe1a00000, + + ARMI_MUL = 0xe0000090, + ARMI_SMULL = 0xe0c00090, + + ARMI_LDR = 0xe4100000, + ARMI_LDRB = 0xe4500000, + ARMI_LDRH = 0xe01000b0, + ARMI_LDRSB = 0xe01000d0, + ARMI_LDRSH = 0xe01000f0, + ARMI_LDRD = 0xe00000d0, + ARMI_STR = 0xe4000000, + ARMI_STRB = 0xe4400000, + ARMI_STRH = 0xe00000b0, + ARMI_STRD = 0xe00000f0, + + ARMI_B = 0xea000000, + ARMI_BL = 0xeb000000, + ARMI_BLX = 0xfa000000, + ARMI_BLXr = 0xe12fff30, + + /* ARMv6 */ + ARMI_REV = 0xe6bf0f30, + ARMI_SXTB = 0xe6af0070, + ARMI_SXTH = 0xe6bf0070, + ARMI_UXTB = 0xe6ef0070, + ARMI_UXTH = 0xe6ff0070, + + /* ARMv6T2 */ + ARMI_MOVW = 0xe3000000, + ARMI_MOVT = 0xe3400000, +} ARMIns; + +typedef enum ARMShift { + ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR +} ARMShift; + +/* ARM condition codes. */ +typedef enum ARMCC { + CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC, + CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL, + CC_HS = CC_CS, CC_LO = CC_CC +} ARMCC; + +#endif diff --git a/src/luajit2/src/lj_target_x86.h b/src/luajit2/src/lj_target_x86.h new file mode 100644 index 0000000000..34b247d4c9 --- /dev/null +++ b/src/luajit2/src/lj_target_x86.h @@ -0,0 +1,330 @@ +/* +** Definitions for x86 and x64 CPUs. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TARGET_X86_H +#define _LJ_TARGET_X86_H + +/* -- Registers IDs ------------------------------------------------------- */ + +#if LJ_64 +#define GPRDEF(_) \ + _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \ + _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D) +#define FPRDEF(_) \ + _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \ + _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15) +#else +#define GPRDEF(_) \ + _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) +#define FPRDEF(_) \ + _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) +#endif +#define VRIDDEF(_) \ + _(MRM) + +#define RIDENUM(name) RID_##name, + +enum { + GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */ + FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ + RID_MAX, + RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */ + + /* Calling conventions. */ + RID_RET = RID_EAX, +#if LJ_64 + RID_FPRET = RID_XMM0, +#else + RID_RETHI = RID_EDX, +#endif + + /* These definitions must match with the *.dasc file(s): */ + RID_BASE = RID_EDX, /* Interpreter BASE. */ +#if LJ_64 && !LJ_ABI_WIN + RID_LPC = RID_EBX, /* Interpreter PC. */ + RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */ +#else + RID_LPC = RID_ESI, /* Interpreter PC. */ + RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */ +#endif + + /* Register ranges [min, max) and number of registers. */ + RID_MIN_GPR = RID_EAX, + RID_MIN_FPR = RID_XMM0, + RID_MAX_GPR = RID_MIN_FPR, + RID_MAX_FPR = RID_MAX, + RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, + RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR, +}; + +/* -- Register sets ------------------------------------------------------- */ + +/* Make use of all registers, except the stack pointer. */ +#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP)) +#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) +#define RSET_ALL (RSET_GPR|RSET_FPR) +#define RSET_INIT RSET_ALL + +#if LJ_64 +/* Note: this requires the use of FORCE_REX! */ +#define RSET_GPR8 RSET_GPR +#else +#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1)) +#endif + +/* ABI-specific register sets. */ +#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX)) +#if LJ_64 +#if LJ_ABI_WIN +/* Windows x64 ABI. */ +#define RSET_SCRATCH \ + (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1)) +#define REGARG_GPRS \ + (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5)) +#define REGARG_NUMGPR 4 +#define REGARG_FIRSTFPR RID_XMM0 +#define REGARG_LASTFPR RID_XMM3 +#define STACKARG_OFS (4*8) +#else +/* The rest of the civilized x64 world has a common ABI. */ +#define RSET_SCRATCH \ + (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR) +#define REGARG_GPRS \ + (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \ + <<5))<<5))<<5))<<5))<<5)) +#define REGARG_NUMGPR 6 +#define REGARG_FIRSTFPR RID_XMM0 +#define REGARG_LASTFPR RID_XMM7 +#define STACKARG_OFS 0 +#endif +#else +/* Common x86 ABI. */ +#define RSET_SCRATCH (RSET_ACD|RSET_FPR) +#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */ +#define REGARG_NUMGPR 2 /* Fastcall only. */ +#define STACKARG_OFS 0 +#endif + +#if LJ_64 +/* Prefer the low 8 regs of each type to reduce REX prefixes. */ +#undef rset_picktop +#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18) +#endif + +/* -- Spill slots --------------------------------------------------------- */ + +/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs. +** +** SPS_FIXED: Available fixed spill slots in interpreter frame. +** This definition must match with the *.dasc file(s). +** +** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots. +*/ +#if LJ_64 +#if LJ_ABI_WIN +#define SPS_FIXED (4*2) +#define SPS_FIRST (4*2) /* Don't use callee register save area. */ +#else +#define SPS_FIXED 4 +#define SPS_FIRST 2 +#endif +#else +#define SPS_FIXED 6 +#define SPS_FIRST 2 +#endif + +#define sps_scale(slot) (4 * (int32_t)(slot)) +#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3) + +/* -- Exit state ---------------------------------------------------------- */ + +/* This definition must match with the *.dasc file(s). */ +typedef struct { + lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ + intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ + int32_t spill[256]; /* Spill slots. */ +} ExitState; + +/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */ +#define EXITSTUB_SPACING (2+2) +#define EXITSTUBS_PER_GROUP 32 + +/* -- x86 ModRM operand encoding ------------------------------------------ */ + +typedef enum { + XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0, + XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0, + XM_MASK = 0xc0 +} x86Mode; + +/* Structure to hold variable ModRM operand. */ +typedef struct { + int32_t ofs; /* Offset. */ + uint8_t base; /* Base register or RID_NONE. */ + uint8_t idx; /* Index register or RID_NONE. */ + uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */ +} x86ModRM; + +/* -- Opcodes ------------------------------------------------------------- */ + +/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */ +#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24))) +#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24))) +#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24))) +#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24))) +#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24))) +#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24))) +#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24))) + +/* This list of x86 opcodes is not intended to be complete. Opcodes are only +** included when needed. Take a look at DynASM or jit.dis_x86 to see the +** whole mess. +*/ +typedef enum { + /* Fixed length opcodes. XI_* prefix. */ + XI_NOP = 0x90, + XI_CALL = 0xe8, + XI_JMP = 0xe9, + XI_JMPs = 0xeb, + XI_JCCs = 0x70, /* Really 7x. */ + XI_JCCn = 0x80, /* Really 0f8x. */ + XI_LEA = 0x8d, + XI_MOVri = 0xb8, /* Really b8+r. */ + XI_ARITHib = 0x80, + XI_ARITHi = 0x81, + XI_ARITHi8 = 0x83, + XI_PUSHi8 = 0x6a, + XI_TEST = 0x85, + XI_MOVmi = 0xc7, + + /* Note: little-endian byte-order! */ + XI_FLDZ = 0xeed9, + XI_FLD1 = 0xe8d9, + XI_FLDLG2 = 0xecd9, + XI_FLDLN2 = 0xedd9, + XI_FDUP = 0xc0d9, /* Really fld st0. */ + XI_FPOP = 0xd8dd, /* Really fstp st0. */ + XI_FPOP1 = 0xd9dd, /* Really fstp st1. */ + XI_FRNDINT = 0xfcd9, + XI_FSIN = 0xfed9, + XI_FCOS = 0xffd9, + XI_FPTAN = 0xf2d9, + XI_FPATAN = 0xf3d9, + XI_FSCALE = 0xfdd9, + XI_FYL2X = 0xf1d9, + + /* Variable-length opcodes. XO_* prefix. */ + XO_MOV = XO_(8b), + XO_MOVto = XO_(89), + XO_MOVtow = XO_66(89), + XO_MOVtob = XO_(88), + XO_MOVmi = XO_(c7), + XO_MOVmib = XO_(c6), + XO_LEA = XO_(8d), + XO_ARITHib = XO_(80), + XO_ARITHi = XO_(81), + XO_ARITHi8 = XO_(83), + XO_ARITHiw8 = XO_66(83), + XO_SHIFTi = XO_(c1), + XO_SHIFT1 = XO_(d1), + XO_SHIFTcl = XO_(d3), + XO_IMUL = XO_0f(af), + XO_IMULi = XO_(69), + XO_IMULi8 = XO_(6b), + XO_CMP = XO_(3b), + XO_TEST = XO_(85), + XO_GROUP3b = XO_(f6), + XO_GROUP3 = XO_(f7), + XO_GROUP5b = XO_(fe), + XO_GROUP5 = XO_(ff), + XO_MOVZXb = XO_0f(b6), + XO_MOVZXw = XO_0f(b7), + XO_MOVSXb = XO_0f(be), + XO_MOVSXw = XO_0f(bf), + XO_MOVSXd = XO_(63), + XO_BSWAP = XO_0f(c8), + XO_CMOV = XO_0f(40), + + XO_MOVSD = XO_f20f(10), + XO_MOVSDto = XO_f20f(11), + XO_MOVSS = XO_f30f(10), + XO_MOVSSto = XO_f30f(11), + XO_MOVLPD = XO_660f(12), + XO_MOVAPS = XO_0f(28), + XO_XORPS = XO_0f(57), + XO_ANDPS = XO_0f(54), + XO_ADDSD = XO_f20f(58), + XO_SUBSD = XO_f20f(5c), + XO_MULSD = XO_f20f(59), + XO_DIVSD = XO_f20f(5e), + XO_SQRTSD = XO_f20f(51), + XO_MINSD = XO_f20f(5d), + XO_MAXSD = XO_f20f(5f), + XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */ + XO_UCOMISD = XO_660f(2e), + XO_CVTSI2SD = XO_f20f(2a), + XO_CVTSD2SI = XO_f20f(2d), + XO_CVTTSD2SI= XO_f20f(2c), + XO_CVTSI2SS = XO_f30f(2a), + XO_CVTSS2SI = XO_f30f(2d), + XO_CVTTSS2SI= XO_f30f(2c), + XO_CVTSS2SD = XO_f30f(5a), + XO_CVTSD2SS = XO_f20f(5a), + XO_ADDSS = XO_f30f(58), + XO_MOVD = XO_660f(6e), + XO_MOVDto = XO_660f(7e), + + XO_FLDd = XO_(d9), XOg_FLDd = 0, + XO_FLDq = XO_(dd), XOg_FLDq = 0, + XO_FILDd = XO_(db), XOg_FILDd = 0, + XO_FILDq = XO_(df), XOg_FILDq = 5, + XO_FSTPd = XO_(d9), XOg_FSTPd = 3, + XO_FSTPq = XO_(dd), XOg_FSTPq = 3, + XO_FISTPq = XO_(df), XOg_FISTPq = 7, + XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1, + XO_FADDq = XO_(dc), XOg_FADDq = 0, + XO_FLDCW = XO_(d9), XOg_FLDCW = 5, + XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7 +} x86Op; + +/* x86 opcode groups. */ +typedef uint32_t x86Group; + +#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g))) +#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g) +#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000))) +#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000))) + +#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27))) +#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27))) + +typedef enum { + XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP, + XOg_X_IMUL +} x86Arith; + +typedef enum { + XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR +} x86Shift; + +typedef enum { + XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV +} x86Group3; + +typedef enum { + XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH +} x86Group5; + +/* x86 condition codes. */ +typedef enum { + CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE, + CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE, + CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB, + CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE, + CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL, + CC_NG = CC_LE, CC_G = CC_NLE +} x86CC; + +#endif diff --git a/src/luajit2/src/lj_trace.c b/src/luajit2/src/lj_trace.c new file mode 100644 index 0000000000..2bb2075d88 --- /dev/null +++ b/src/luajit2/src/lj_trace.c @@ -0,0 +1,794 @@ +/* +** Trace management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_trace_c +#define LUA_CORE + +#include "lj_obj.h" + +#if LJ_HASJIT + +#include "lj_gc.h" +#include "lj_err.h" +#include "lj_debug.h" +#include "lj_str.h" +#include "lj_frame.h" +#include "lj_state.h" +#include "lj_bc.h" +#include "lj_ir.h" +#include "lj_jit.h" +#include "lj_iropt.h" +#include "lj_mcode.h" +#include "lj_trace.h" +#include "lj_snap.h" +#include "lj_gdbjit.h" +#include "lj_record.h" +#include "lj_asm.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" +#include "lj_target.h" + +/* -- Error handling ------------------------------------------------------ */ + +/* Synchronous abort with error message. */ +void lj_trace_err(jit_State *J, TraceError e) +{ + setnilV(&J->errinfo); /* No error info. */ + setintV(J->L->top++, (int32_t)e); + lj_err_throw(J->L, LUA_ERRRUN); +} + +/* Synchronous abort with error message and error info. */ +void lj_trace_err_info(jit_State *J, TraceError e) +{ + setintV(J->L->top++, (int32_t)e); + lj_err_throw(J->L, LUA_ERRRUN); +} + +/* -- Trace management ---------------------------------------------------- */ + +/* The current trace is first assembled in J->cur. The variable length +** arrays point to shared, growable buffers (J->irbuf etc.). When trace +** recording ends successfully, the current trace and its data structures +** are copied to a new (compact) GCtrace object. +*/ + +/* Find a free trace number. */ +static TraceNo trace_findfree(jit_State *J) +{ + MSize osz, lim; + if (J->freetrace == 0) + J->freetrace = 1; + for (; J->freetrace < J->sizetrace; J->freetrace++) + if (traceref(J, J->freetrace) == NULL) + return J->freetrace++; + /* Need to grow trace array. */ + lim = (MSize)J->param[JIT_P_maxtrace] + 1; + if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535; + osz = J->sizetrace; + if (osz >= lim) + return 0; /* Too many traces. */ + lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef); + for (; osz < J->sizetrace; osz++) + setgcrefnull(J->trace[osz]); + return J->freetrace; +} + +#define TRACE_APPENDVEC(field, szfield, tp) \ + T->field = (tp *)p; \ + memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \ + p += J->cur.szfield*sizeof(tp); + +#ifdef LUAJIT_USE_PERFTOOLS +/* +** Create symbol table of JIT-compiled code. For use with Linux perf tools. +** Example usage: +** perf record -f -e cycles luajit test.lua +** perf report -s symbol +** rm perf.data /tmp/perf-*.map +*/ +#include <stdio.h> +#include <unistd.h> + +static void perftools_addtrace(GCtrace *T) +{ + static FILE *fp; + GCproto *pt = &gcref(T->startpt)->pt; + const BCIns *startpc = mref(T->startpc, const BCIns); + const char *name = proto_chunknamestr(pt); + BCLine lineno; + if (name[0] == '@' || name[0] == '=') + name++; + else + name = "(string)"; + lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); + lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); + if (!fp) { + char fname[40]; + sprintf(fname, "/tmp/perf-%d.map", getpid()); + if (!(fp = fopen(fname, "w"))) return; + setlinebuf(fp); + } + fprintf(fp, "%lx %x TRACE_%d::%s:%u\n", + (long)T->mcode, T->szmcode, T->traceno, name, lineno); +} +#endif + +/* Save current trace by copying and compacting it. */ +static void trace_save(jit_State *J) +{ + size_t sztr = ((sizeof(GCtrace)+7)&~7); + size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); + size_t sz = sztr + szins + + J->cur.nsnap*sizeof(SnapShot) + + J->cur.nsnapmap*sizeof(SnapEntry); + GCtrace *T = lj_mem_newt(J->L, (MSize)sz, GCtrace); + char *p = (char *)T + sztr; + memcpy(T, &J->cur, sizeof(GCtrace)); + setgcrefr(T->nextgc, J2G(J)->gc.root); + setgcrefp(J2G(J)->gc.root, T); + newwhite(J2G(J), T); + T->gct = ~LJ_TTRACE; + T->ir = (IRIns *)p - J->cur.nk; + memcpy(p, J->cur.ir+J->cur.nk, szins); + p += szins; + TRACE_APPENDVEC(snap, nsnap, SnapShot) + TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry) + J->cur.traceno = 0; + setgcrefp(J->trace[T->traceno], T); + lj_gc_barriertrace(J2G(J), T->traceno); + lj_gdbjit_addtrace(J, T); +#ifdef LUAJIT_USE_PERFTOOLS + perftools_addtrace(T); +#endif +} + +void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T) +{ + jit_State *J = G2J(g); + if (T->traceno) { + lj_gdbjit_deltrace(J, T); + if (T->traceno < J->freetrace) + J->freetrace = T->traceno; + setgcrefnull(J->trace[T->traceno]); + } + lj_mem_free(g, T, + ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + + T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry)); +} + +/* Re-enable compiling a prototype by unpatching any modified bytecode. */ +void lj_trace_reenableproto(GCproto *pt) +{ + if ((pt->flags & PROTO_ILOOP)) { + BCIns *bc = proto_bc(pt); + BCPos i, sizebc = pt->sizebc;; + pt->flags &= ~PROTO_ILOOP; + if (bc_op(bc[0]) == BC_IFUNCF) + setbc_op(&bc[0], BC_FUNCF); + for (i = 1; i < sizebc; i++) { + BCOp op = bc_op(bc[i]); + if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP) + setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP); + } + } +} + +/* Unpatch the bytecode modified by a root trace. */ +static void trace_unpatch(jit_State *J, GCtrace *T) +{ + BCOp op = bc_op(T->startins); + BCIns *pc = mref(T->startpc, BCIns); + UNUSED(J); + if (op == BC_JMP) + return; /* No need to unpatch branches in parent traces (yet). */ + switch (bc_op(*pc)) { + case BC_JFORL: + lua_assert(traceref(J, bc_d(*pc)) == T); + *pc = T->startins; + pc += bc_j(T->startins); + lua_assert(bc_op(*pc) == BC_JFORI); + setbc_op(pc, BC_FORI); + break; + case BC_JITERL: + case BC_JLOOP: + lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op)); + *pc = T->startins; + break; + case BC_JMP: + lua_assert(op == BC_ITERL); + pc += bc_j(*pc)+2; + if (bc_op(*pc) == BC_JITERL) { + lua_assert(traceref(J, bc_d(*pc)) == T); + *pc = T->startins; + } + break; + case BC_JFUNCF: + lua_assert(op == BC_FUNCF); + *pc = T->startins; + break; + default: /* Already unpatched. */ + break; + } +} + +/* Flush a root trace. */ +static void trace_flushroot(jit_State *J, GCtrace *T) +{ + GCproto *pt = &gcref(T->startpt)->pt; + lua_assert(T->root == 0 && pt != NULL); + /* First unpatch any modified bytecode. */ + trace_unpatch(J, T); + /* Unlink root trace from chain anchored in prototype. */ + if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */ + pt->trace = T->nextroot; + } else if (pt->trace) { /* Otherwise search in chain of root traces. */ + GCtrace *T2 = traceref(J, pt->trace); + if (T2) { + for (; T2->nextroot; T2 = traceref(J, T2->nextroot)) + if (T2->nextroot == T->traceno) { + T2->nextroot = T->nextroot; /* Unlink from chain. */ + break; + } + } + } +} + +/* Flush a trace. Only root traces are considered. */ +void lj_trace_flush(jit_State *J, TraceNo traceno) +{ + if (traceno > 0 && traceno < J->sizetrace) { + GCtrace *T = traceref(J, traceno); + if (T && T->root == 0) + trace_flushroot(J, T); + } +} + +/* Flush all traces associated with a prototype. */ +void lj_trace_flushproto(global_State *g, GCproto *pt) +{ + while (pt->trace != 0) + trace_flushroot(G2J(g), traceref(G2J(g), pt->trace)); +} + +/* Flush all traces. */ +int lj_trace_flushall(lua_State *L) +{ + jit_State *J = L2J(L); + ptrdiff_t i; + if ((J2G(J)->hookmask & HOOK_GC)) + return 1; + for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) { + GCtrace *T = traceref(J, i); + if (T) { + if (T->root == 0) + trace_flushroot(J, T); + lj_gdbjit_deltrace(J, T); + T->traceno = 0; + setgcrefnull(J->trace[i]); + } + } + J->cur.traceno = 0; + J->freetrace = 0; + /* Clear penalty cache. */ + memset(J->penalty, 0, sizeof(J->penalty)); + /* Free the whole machine code and invalidate all exit stub groups. */ + lj_mcode_free(J); + memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "flush")); + ); + return 0; +} + +/* Initialize JIT compiler state. */ +void lj_trace_initstate(global_State *g) +{ + jit_State *J = G2J(g); + TValue *tv; + /* Initialize SIMD constants. */ + tv = LJ_KSIMD(J, LJ_KSIMD_ABS); + tv[0].u64 = U64x(7fffffff,ffffffff); + tv[1].u64 = U64x(7fffffff,ffffffff); + tv = LJ_KSIMD(J, LJ_KSIMD_NEG); + tv[0].u64 = U64x(80000000,00000000); + tv[1].u64 = U64x(80000000,00000000); +} + +/* Free everything associated with the JIT compiler state. */ +void lj_trace_freestate(global_State *g) +{ + jit_State *J = G2J(g); +#ifdef LUA_USE_ASSERT + { /* This assumes all traces have already been freed. */ + ptrdiff_t i; + for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) + lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL); + } +#endif + lj_mcode_free(J); + lj_ir_k64_freeall(J); + lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); + lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); + lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); + lj_mem_freevec(g, J->trace, J->sizetrace, GCRef); +} + +/* -- Penalties and blacklisting ------------------------------------------ */ + +/* Blacklist a bytecode instruction. */ +static void blacklist_pc(GCproto *pt, BCIns *pc) +{ + setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP); + pt->flags |= PROTO_ILOOP; +} + +/* Penalize a bytecode instruction. */ +static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e) +{ + uint32_t i, val = PENALTY_MIN; + for (i = 0; i < PENALTY_SLOTS; i++) + if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */ + /* First try to bump its hotcount several times. */ + val = ((uint32_t)J->penalty[i].val << 1) + + LJ_PRNG_BITS(J, PENALTY_RNDBITS); + if (val > PENALTY_MAX) { + blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */ + return; + } + goto setpenalty; + } + /* Assign a new penalty cache slot. */ + i = J->penaltyslot; + J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1); + setmref(J->penalty[i].pc, pc); +setpenalty: + J->penalty[i].val = (uint16_t)val; + J->penalty[i].reason = e; + hotcount_set(J2GG(J), pc+1, val); +} + +/* -- Trace compiler state machine ---------------------------------------- */ + +/* Start tracing. */ +static void trace_start(jit_State *J) +{ + lua_State *L; + TraceNo traceno; + + if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ + if (J->parent == 0) { + /* Lazy bytecode patching to disable hotcount events. */ + lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || + bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); + setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); + J->pt->flags |= PROTO_ILOOP; + } + J->state = LJ_TRACE_IDLE; /* Silently ignored. */ + return; + } + + /* Get a new trace number. */ + traceno = trace_findfree(J); + if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ + lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); + lj_trace_flushall(J->L); + J->state = LJ_TRACE_IDLE; /* Silently ignored. */ + return; + } + setgcrefp(J->trace[traceno], &J->cur); + + /* Setup enough of the current trace to be able to send the vmevent. */ + memset(&J->cur, 0, sizeof(GCtrace)); + J->cur.traceno = traceno; + J->cur.nins = J->cur.nk = REF_BASE; + J->cur.ir = J->irbuf; + J->cur.snap = J->snapbuf; + J->cur.snapmap = J->snapmapbuf; + J->mergesnap = 0; + J->needsnap = 0; + J->bcskip = 0; + J->guardemit.irt = 0; + J->postproc = LJ_POST_NONE; + lj_resetsplit(J); + setgcref(J->cur.startpt, obj2gco(J->pt)); + + L = J->L; + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "start")); + setintV(L->top++, traceno); + setfuncV(L, L->top++, J->fn); + setintV(L->top++, proto_bcpos(J->pt, J->pc)); + if (J->parent) { + setintV(L->top++, J->parent); + setintV(L->top++, J->exitno); + } + ); + lj_record_setup(J); +} + +/* Stop tracing. */ +static void trace_stop(jit_State *J) +{ + BCIns *pc = mref(J->cur.startpc, BCIns); + BCOp op = bc_op(J->cur.startins); + GCproto *pt = &gcref(J->cur.startpt)->pt; + TraceNo traceno = J->cur.traceno; + lua_State *L; + + switch (op) { + case BC_FORL: + setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */ + /* fallthrough */ + case BC_LOOP: + case BC_ITERL: + case BC_FUNCF: + /* Patch bytecode of starting instruction in root trace. */ + setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); + setbc_d(pc, traceno); + addroot: + /* Add to root trace chain in prototype. */ + J->cur.nextroot = pt->trace; + pt->trace = (TraceNo1)traceno; + break; + case BC_RET: + case BC_RET0: + case BC_RET1: + *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno); + goto addroot; + case BC_JMP: + /* Patch exit branch in parent to side trace entry. */ + lua_assert(J->parent != 0 && J->cur.root != 0); + lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); + /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ + traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; + /* Add to side trace chain in root trace. */ + { + GCtrace *root = traceref(J, J->cur.root); + root->nchild++; + J->cur.nextside = root->nextside; + root->nextside = (TraceNo1)traceno; + } + break; + default: + lua_assert(0); + break; + } + + /* Commit new mcode only after all patching is done. */ + lj_mcode_commit(J, J->cur.mcode); + J->postproc = LJ_POST_NONE; + trace_save(J); + + L = J->L; + lj_vmevent_send(L, TRACE, + setstrV(L, L->top++, lj_str_newlit(L, "stop")); + setintV(L->top++, traceno); + ); +} + +/* Start a new root trace for down-recursion. */ +static int trace_downrec(jit_State *J) +{ + /* Restart recording at the return instruction. */ + lua_assert(J->pt != NULL); + lua_assert(bc_isret(bc_op(*J->pc))); + if (bc_op(*J->pc) == BC_RETM) + return 0; /* NYI: down-recursion with RETM. */ + J->parent = 0; + J->exitno = 0; + J->state = LJ_TRACE_RECORD; + trace_start(J); + return 1; +} + +/* Abort tracing. */ +static int trace_abort(jit_State *J) +{ + lua_State *L = J->L; + TraceError e = LJ_TRERR_RECERR; + TraceNo traceno; + + J->postproc = LJ_POST_NONE; + lj_mcode_abort(J); + if (tvisnumber(L->top-1)) + e = (TraceError)numberVint(L->top-1); + if (e == LJ_TRERR_MCODELM) { + J->state = LJ_TRACE_ASM; + return 1; /* Retry ASM with new MCode area. */ + } + /* Penalize or blacklist starting bytecode instruction. */ + if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) + penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); + + /* Is there anything to abort? */ + traceno = J->cur.traceno; + if (traceno) { + ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ + J->cur.link = 0; + lj_vmevent_send(L, TRACE, + TValue *frame; + const BCIns *pc; + GCfunc *fn; + setstrV(L, L->top++, lj_str_newlit(L, "abort")); + setintV(L->top++, traceno); + /* Find original Lua function call to generate a better error message. */ + frame = J->L->base-1; + pc = J->pc; + while (!isluafunc(frame_func(frame))) { + pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1; + frame = frame_prev(frame); + } + fn = frame_func(frame); + setfuncV(L, L->top++, fn); + setintV(L->top++, proto_bcpos(funcproto(fn), pc)); + copyTV(L, L->top++, restorestack(L, errobj)); + copyTV(L, L->top++, &J->errinfo); + ); + /* Drop aborted trace after the vmevent (which may still access it). */ + setgcrefnull(J->trace[traceno]); + if (traceno < J->freetrace) + J->freetrace = traceno; + J->cur.traceno = 0; + } + L->top--; /* Remove error object */ + if (e == LJ_TRERR_DOWNREC) + return trace_downrec(J); + else if (e == LJ_TRERR_MCODEAL) + lj_trace_flushall(L); + return 0; +} + +/* Perform pending re-patch of a bytecode instruction. */ +static LJ_AINLINE void trace_pendpatch(jit_State *J, int force) +{ + if (LJ_UNLIKELY(J->patchpc) && (force || J->chain[IR_RETF])) { + *J->patchpc = J->patchins; + J->patchpc = NULL; + } +} + +/* State machine for the trace compiler. Protected callback. */ +static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud) +{ + jit_State *J = (jit_State *)ud; + UNUSED(dummy); + do { + retry: + switch (J->state) { + case LJ_TRACE_START: + J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */ + trace_start(J); + lj_dispatch_update(J2G(J)); + break; + + case LJ_TRACE_RECORD: + trace_pendpatch(J, 0); + setvmstate(J2G(J), RECORD); + lj_vmevent_send(L, RECORD, + setintV(L->top++, J->cur.traceno); + setfuncV(L, L->top++, J->fn); + setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1); + setintV(L->top++, J->framedepth); + ); + lj_record_ins(J); + break; + + case LJ_TRACE_END: + trace_pendpatch(J, 1); + J->loopref = 0; + if ((J->flags & JIT_F_OPT_LOOP) && + J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) { + setvmstate(J2G(J), OPT); + lj_opt_dce(J); + if (lj_opt_loop(J)) { /* Loop optimization failed? */ + J->cur.link = 0; + J->loopref = J->cur.nins; + J->state = LJ_TRACE_RECORD; /* Try to continue recording. */ + break; + } + J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */ + } + lj_opt_split(J); + J->state = LJ_TRACE_ASM; + break; + + case LJ_TRACE_ASM: + setvmstate(J2G(J), ASM); + lj_asm_trace(J, &J->cur); + trace_stop(J); + setvmstate(J2G(J), INTERP); + J->state = LJ_TRACE_IDLE; + lj_dispatch_update(J2G(J)); + return NULL; + + default: /* Trace aborted asynchronously. */ + setintV(L->top++, (int32_t)LJ_TRERR_RECERR); + /* fallthrough */ + case LJ_TRACE_ERR: + trace_pendpatch(J, 1); + if (trace_abort(J)) + goto retry; + setvmstate(J2G(J), INTERP); + J->state = LJ_TRACE_IDLE; + lj_dispatch_update(J2G(J)); + return NULL; + } + } while (J->state > LJ_TRACE_RECORD); + return NULL; +} + +/* -- Event handling ------------------------------------------------------ */ + +/* A bytecode instruction is about to be executed. Record it. */ +void lj_trace_ins(jit_State *J, const BCIns *pc) +{ + /* Note: J->L must already be set. pc is the true bytecode PC here. */ + J->pc = pc; + J->fn = curr_func(J->L); + J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL; + while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0) + J->state = LJ_TRACE_ERR; +} + +/* A hotcount triggered. Start recording a root trace. */ +void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc) +{ + ERRNO_SAVE + /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */ + hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]+1); /* Reset hotcount. */ + /* Only start a new trace if not recording or inside __gc call or vmevent. */ + if (J->state == LJ_TRACE_IDLE && + !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { + J->parent = 0; /* Root trace. */ + J->exitno = 0; + J->state = LJ_TRACE_START; + lj_trace_ins(J, pc-1); + } + ERRNO_RESTORE +} + +/* Check for a hot side exit. If yes, start recording a side trace. */ +static void trace_hotside(jit_State *J, const BCIns *pc) +{ + SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; + if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && + snap->count != SNAPCOUNT_DONE && + ++snap->count >= J->param[JIT_P_hotexit]) { + lua_assert(J->state == LJ_TRACE_IDLE); + /* J->parent is non-zero for a side trace. */ + J->state = LJ_TRACE_START; + lj_trace_ins(J, pc); + } +} + +/* Tiny struct to pass data to protected call. */ +typedef struct ExitDataCP { + jit_State *J; + void *exptr; /* Pointer to exit state. */ + const BCIns *pc; /* Restart interpreter at this PC. */ +} ExitDataCP; + +/* Need to protect lj_snap_restore because it may throw. */ +static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud) +{ + ExitDataCP *exd = (ExitDataCP *)ud; + cframe_errfunc(L->cframe) = -1; /* Inherit error function. */ + exd->pc = lj_snap_restore(exd->J, exd->exptr); + UNUSED(dummy); + return NULL; +} + +#ifndef LUAJIT_DISABLE_VMEVENT +/* Push all registers from exit state. */ +static void trace_exit_regs(lua_State *L, ExitState *ex) +{ + int32_t i; + setintV(L->top++, RID_NUM_GPR); + setintV(L->top++, RID_NUM_FPR); + for (i = 0; i < RID_NUM_GPR; i++) { + if (sizeof(ex->gpr[i]) == sizeof(int32_t)) + setintV(L->top++, (int32_t)ex->gpr[i]); + else + setnumV(L->top++, (lua_Number)ex->gpr[i]); + } +#if !LJ_SOFTFP + for (i = 0; i < RID_NUM_FPR; i++) { + setnumV(L->top, ex->fpr[i]); + if (LJ_UNLIKELY(tvisnan(L->top))) + setnanV(L->top); + L->top++; + } +#endif +} +#endif + +#ifdef EXITSTATE_PCREG +/* Determine trace number from pc of exit instruction. */ +static TraceNo trace_exit_find(jit_State *J, MCode *pc) +{ + TraceNo traceno; + for (traceno = 1; traceno < J->sizetrace; traceno++) { + GCtrace *T = traceref(J, traceno); + if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) { + if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ + lua_assert(T->root != 0); + traceno = T->ir[REF_BASE].op1; + J->exitno = T->ir[REF_BASE].op2; + } + return traceno; + } + } + lua_assert(0); + return 0; +} +#endif + +/* A trace exited. Restore interpreter state. */ +int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) +{ + ERRNO_SAVE + lua_State *L = J->L; + ExitState *ex = (ExitState *)exptr; + ExitDataCP exd; + int errcode; + const BCIns *pc; + void *cf; +#ifdef EXITSTATE_PCREG + J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]); +#endif + lua_assert(traceref(J, J->parent) != NULL && + J->exitno < traceref(J, J->parent)->nsnap); + exd.J = J; + exd.exptr = exptr; + errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); + if (errcode) + return -errcode; /* Return negated error code. */ + + lj_vmevent_send(L, TEXIT, + lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); + setintV(L->top++, J->parent); + setintV(L->top++, J->exitno); + trace_exit_regs(L, ex); + ); + + pc = exd.pc; + cf = cframe_raw(L->cframe); + setcframe_pc(cf, pc); + if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) + lj_gc_step(L); /* Exited because of GC: drive GC forward. */ + else + trace_hotside(J, pc); + if (bc_op(*pc) == BC_JLOOP) { + BCIns *retpc = &traceref(J, bc_d(*pc))->startins; + if (bc_isret(bc_op(*retpc))) { + if (J->state == LJ_TRACE_RECORD) { + J->patchins = *pc; + J->patchpc = (BCIns *)pc; + *J->patchpc = *retpc; + } else { + pc = retpc; + setcframe_pc(cf, pc); + } + } + } + /* Return MULTRES or 0. */ + ERRNO_RESTORE + switch (bc_op(*pc)) { + case BC_CALLM: case BC_CALLMT: + return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc)); + case BC_RETM: + return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); + case BC_TSETM: + return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc)); + default: + if (bc_op(*pc) >= BC_FUNCF) + return (int)((BCReg)(L->top - L->base) + 1); + return 0; + } +} + +#endif diff --git a/src/luajit2/src/lj_trace.h b/src/luajit2/src/lj_trace.h new file mode 100644 index 0000000000..25a456d936 --- /dev/null +++ b/src/luajit2/src/lj_trace.h @@ -0,0 +1,53 @@ +/* +** Trace management. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_TRACE_H +#define _LJ_TRACE_H + +#include "lj_obj.h" + +#if LJ_HASJIT +#include "lj_jit.h" +#include "lj_dispatch.h" + +/* Trace errors. */ +typedef enum { +#define TREDEF(name, msg) LJ_TRERR_##name, +#include "lj_traceerr.h" + LJ_TRERR__MAX +} TraceError; + +LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e); +LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e); + +/* Trace management. */ +LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T); +LJ_FUNC void lj_trace_reenableproto(GCproto *pt); +LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt); +LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno); +LJ_FUNC int lj_trace_flushall(lua_State *L); +LJ_FUNC void lj_trace_initstate(global_State *g); +LJ_FUNC void lj_trace_freestate(global_State *g); + +/* Event handling. */ +LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc); +LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc); +LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr); + +/* Signal asynchronous abort of trace or end of trace. */ +#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE) +#define lj_trace_end(J) (J->state = LJ_TRACE_END) + +#else + +#define lj_trace_flushall(L) (UNUSED(L), 0) +#define lj_trace_initstate(g) UNUSED(g) +#define lj_trace_freestate(g) UNUSED(g) +#define lj_trace_abort(g) UNUSED(g) +#define lj_trace_end(J) UNUSED(J) + +#endif + +#endif diff --git a/src/luajit2/src/lj_traceerr.h b/src/luajit2/src/lj_traceerr.h new file mode 100644 index 0000000000..756330ec44 --- /dev/null +++ b/src/luajit2/src/lj_traceerr.h @@ -0,0 +1,61 @@ +/* +** Trace compiler error messages. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +/* This file may be included multiple times with different TREDEF macros. */ + +/* Recording. */ +TREDEF(RECERR, "error thrown or hook called during recording") +TREDEF(TRACEOV, "trace too long") +TREDEF(STACKOV, "trace too deep") +TREDEF(SNAPOV, "too many snapshots") +TREDEF(BLACKL, "blacklisted") +TREDEF(NYIBC, "NYI: bytecode %d") + +/* Recording loop ops. */ +TREDEF(LLEAVE, "leaving loop in root trace") +TREDEF(LINNER, "inner loop in root trace") +TREDEF(LUNROLL, "loop unroll limit reached") + +/* Recording calls/returns. */ +TREDEF(BADTYPE, "bad argument type") +TREDEF(CJITOFF, "call to JIT-disabled function") +TREDEF(CUNROLL, "call unroll limit reached") +TREDEF(DOWNREC, "down-recursion, restarting") +TREDEF(NYICF, "NYI: C function %p") +TREDEF(NYIFF, "NYI: FastFunc %s") +TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s") +TREDEF(NYIRETL, "NYI: return to lower frame") + +/* Recording indexed load/store. */ +TREDEF(STORENN, "store with nil or NaN key") +TREDEF(NOMM, "missing metamethod") +TREDEF(IDXLOOP, "looping index lookup") +TREDEF(NYITMIX, "NYI: mixed sparse/dense table") + +/* Recording C data operations. */ +TREDEF(NOCACHE, "symbol not in cache") +TREDEF(NYICONV, "NYI: unsupported C type conversion") +TREDEF(NYICALL, "NYI: unsupported C function type") + +/* Optimizations. */ +TREDEF(GFAIL, "guard would always fail") +TREDEF(PHIOV, "too many PHIs") +TREDEF(TYPEINS, "persistent type instability") + +/* Assembler. */ +TREDEF(MCODEAL, "failed to allocate mcode memory") +TREDEF(MCODEOV, "machine code too long") +TREDEF(MCODELM, "hit mcode limit (retrying)") +TREDEF(SPILLOV, "too many spill slots") +TREDEF(BADRA, "inconsistent register allocation") +TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d") +TREDEF(NYIPHI, "NYI: PHI shuffling too complex") +TREDEF(NYICOAL, "NYI: register coalescing too complex") + +#undef TREDEF + +/* Detecting unused error messages: + awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh +*/ diff --git a/src/luajit2/src/lj_udata.c b/src/luajit2/src/lj_udata.c new file mode 100644 index 0000000000..869f31f8af --- /dev/null +++ b/src/luajit2/src/lj_udata.c @@ -0,0 +1,34 @@ +/* +** Userdata handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_udata_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_gc.h" +#include "lj_udata.h" + +GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env) +{ + GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata); + global_State *g = G(L); + newwhite(g, ud); /* Not finalized. */ + ud->gct = ~LJ_TUDATA; + ud->udtype = UDTYPE_USERDATA; + ud->len = sz; + /* NOBARRIER: The GCudata is new (marked white). */ + setgcrefnull(ud->metatable); + setgcref(ud->env, obj2gco(env)); + /* Chain to userdata list (after main thread). */ + setgcrefr(ud->nextgc, mainthread(g)->nextgc); + setgcref(mainthread(g)->nextgc, obj2gco(ud)); + return ud; +} + +void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud) +{ + lj_mem_free(g, ud, sizeudata(ud)); +} + diff --git a/src/luajit2/src/lj_udata.h b/src/luajit2/src/lj_udata.h new file mode 100644 index 0000000000..952c74827c --- /dev/null +++ b/src/luajit2/src/lj_udata.h @@ -0,0 +1,14 @@ +/* +** Userdata handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_UDATA_H +#define _LJ_UDATA_H + +#include "lj_obj.h" + +LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env); +LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud); + +#endif diff --git a/src/luajit2/src/lj_vm.h b/src/luajit2/src/lj_vm.h new file mode 100644 index 0000000000..3f6777ad11 --- /dev/null +++ b/src/luajit2/src/lj_vm.h @@ -0,0 +1,97 @@ +/* +** Assembler VM interface definitions. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_VM_H +#define _LJ_VM_H + +#include "lj_obj.h" + +/* Entry points for ASM parts of VM. */ +LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1); +LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); +typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud); +LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud, + lua_CPFunction cp); +LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); +LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode); +LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe); +LJ_ASMF void lj_vm_unwind_c_eh(void); +LJ_ASMF void lj_vm_unwind_ff_eh(void); +#if LJ_TARGET_X86ORX64 +LJ_ASMF void lj_vm_unwind_rethrow(void); +#endif + +/* Miscellaneous functions. */ +#if LJ_TARGET_X86ORX64 +LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]); +#endif +LJ_ASMF double lj_vm_foldarith(double x, double y, int op); +#if LJ_HASJIT +LJ_ASMF double lj_vm_foldfpm(double x, int op); +#endif +#if !LJ_ARCH_HASFPU +/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */ +#endif + +/* Dispatch targets for recording and hooks. */ +LJ_ASMF void lj_vm_record(void); +LJ_ASMF void lj_vm_inshook(void); +LJ_ASMF void lj_vm_rethook(void); +LJ_ASMF void lj_vm_callhook(void); + +/* Trace exit handling. */ +LJ_ASMF void lj_vm_exit_handler(void); +LJ_ASMF void lj_vm_exit_interp(void); + +/* Internal math helper functions. */ +#if LJ_TARGET_X86ORX64 +#define lj_vm_floor(x) floor(x) +#define lj_vm_ceil(x) ceil(x) +#else +LJ_ASMF double lj_vm_floor(double); +LJ_ASMF double lj_vm_ceil(double); +#endif + +#if LJ_HASJIT +#if LJ_TARGET_X86ORX64 +LJ_ASMF void lj_vm_floor_sse(void); +LJ_ASMF void lj_vm_ceil_sse(void); +LJ_ASMF void lj_vm_trunc_sse(void); +LJ_ASMF void lj_vm_exp_x87(void); +LJ_ASMF void lj_vm_exp2_x87(void); +LJ_ASMF void lj_vm_pow_sse(void); +LJ_ASMF void lj_vm_powi_sse(void); +#else +LJ_ASMF double lj_vm_trunc(double); +LJ_ASMF double lj_vm_powi(double, int32_t); +#if defined(__ANDROID__) || defined(__symbian__) +LJ_ASMF double lj_vm_log2(double); +#else +#define lj_vm_log2 log2 +#endif +#if defined(__symbian__) +LJ_ASMF double lj_vm_exp2(double); +#else +#define lj_vm_exp2 exp2 +#endif +#endif +LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t); +#endif + +/* Continuations for metamethods. */ +LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */ +LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */ +LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */ +LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */ +LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */ +LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */ + +/* Start of the ASM code. */ +LJ_ASMF char lj_vm_asm_begin[]; + +/* Bytecode offsets are relative to lj_vm_asm_begin. */ +#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs))) + +#endif diff --git a/src/luajit2/src/lj_vmevent.c b/src/luajit2/src/lj_vmevent.c new file mode 100644 index 0000000000..67cc6dc14e --- /dev/null +++ b/src/luajit2/src/lj_vmevent.c @@ -0,0 +1,56 @@ +/* +** VM event handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#include <stdio.h> + +#define lj_vmevent_c +#define LUA_CORE + +#include "lj_obj.h" +#include "lj_str.h" +#include "lj_tab.h" +#include "lj_state.h" +#include "lj_dispatch.h" +#include "lj_vm.h" +#include "lj_vmevent.h" + +ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev) +{ + global_State *g = G(L); + GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY); + cTValue *tv = lj_tab_getstr(tabV(registry(L)), s); + if (tvistab(tv)) { + int hash = VMEVENT_HASH(ev); + tv = lj_tab_getint(tabV(tv), hash); + if (tv && tvisfunc(tv)) { + lj_state_checkstack(L, LUA_MINSTACK); + setfuncV(L, L->top++, funcV(tv)); + return savestack(L, L->top); + } + } + g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */ + return 0; +} + +void lj_vmevent_call(lua_State *L, ptrdiff_t argbase) +{ + global_State *g = G(L); + uint8_t oldmask = g->vmevmask; + uint8_t oldh = hook_save(g); + int status; + g->vmevmask = 0; /* Disable all events. */ + hook_vmevent(g); + status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0); + if (LJ_UNLIKELY(status)) { + /* Really shouldn't use stderr here, but where else to complain? */ + L->top--; + fprintf(stderr, "VM handler failed: %s\n", + tvisstr(L->top) ? strVdata(L->top) : "?"); + } + hook_restore(g, oldh); + if (g->vmevmask != VMEVENT_NOCACHE) + g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */ +} + diff --git a/src/luajit2/src/lj_vmevent.h b/src/luajit2/src/lj_vmevent.h new file mode 100644 index 0000000000..857e5be527 --- /dev/null +++ b/src/luajit2/src/lj_vmevent.h @@ -0,0 +1,49 @@ +/* +** VM event handling. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LJ_VMEVENT_H +#define _LJ_VMEVENT_H + +#include "lj_obj.h" + +/* Registry key for VM event handler table. */ +#define LJ_VMEVENTS_REGKEY "_VMEVENTS" +#define LJ_VMEVENTS_HSIZE 4 + +#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7)) +#define VMEVENT_HASH(ev) ((int)(ev) & ~7) +#define VMEVENT_HASHIDX(h) ((int)(h) << 3) +#define VMEVENT_NOCACHE 255 + +#define VMEVENT_DEF(name, hash) \ + LJ_VMEVENT_##name##_, \ + LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3) + +/* VM event IDs. */ +typedef enum { + VMEVENT_DEF(BC, 0x00003883), + VMEVENT_DEF(TRACE, 0xb2d91467), + VMEVENT_DEF(RECORD, 0x9284bf4f), + VMEVENT_DEF(TEXIT, 0xb29df2b0), + LJ_VMEVENT__MAX +} VMEvent; + +#ifdef LUAJIT_DISABLE_VMEVENT +#define lj_vmevent_send(L, ev, args) UNUSED(L) +#else +#define lj_vmevent_send(L, ev, args) \ + if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \ + ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \ + if (argbase) { \ + args \ + lj_vmevent_call(L, argbase); \ + } \ + } + +LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev); +LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase); +#endif + +#endif diff --git a/src/luajit2/src/lj_vmmath.c b/src/luajit2/src/lj_vmmath.c new file mode 100644 index 0000000000..8ae8863bc8 --- /dev/null +++ b/src/luajit2/src/lj_vmmath.c @@ -0,0 +1,111 @@ +/* +** Math helper functions for assembler VM. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#define lj_vmmath_c +#define LUA_CORE + +#include <math.h> + +#include "lj_obj.h" +#if LJ_HASJIT +#include "lj_ir.h" +#endif +#include "lj_vm.h" + +/* -- Helper functions for generated machine code ------------------------- */ + +#if LJ_TARGET_X86ORX64 +/* Wrapper functions to avoid linker issues on OSX. */ +LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); } +LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); } +LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); } +#endif + +#if LJ_HASJIT + +#if defined(__ANDROID__) || defined(__symbian__) +double lj_vm_log2(double a) +{ + return log(a) * 1.4426950408889634074; +} +#endif + +#if defined(__symbian__) +double lj_vm_exp2(double a) +{ + return exp(a * 0.6931471805599453); +} +#endif + +#if !LJ_TARGET_ARM +int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) +{ + uint32_t y, ua, ub; + lua_assert(b != 0); /* This must be checked before using this function. */ + ua = a < 0 ? (uint32_t)-a : (uint32_t)a; + ub = b < 0 ? (uint32_t)-b : (uint32_t)b; + y = ua % ub; + if (y != 0 && (a^b) < 0) y = y - ub; + if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y; + return (int32_t)y; +} +#endif + +#if !LJ_TARGET_X86ORX64 +/* Unsigned x^k. */ +static double lj_vm_powui(double x, uint32_t k) +{ + double y; + lua_assert(k != 0); + for (; (k & 1) == 0; k >>= 1) x *= x; + y = x; + if ((k >>= 1) != 0) { + for (;;) { + x *= x; + if (k == 1) break; + if (k & 1) y *= x; + k >>= 1; + } + y *= x; + } + return y; +} + +/* Signed x^k. */ +double lj_vm_powi(double x, int32_t k) +{ + if (k > 1) + return lj_vm_powui(x, (uint32_t)k); + else if (k == 1) + return x; + else if (k == 0) + return 1; + else + return 1.0 / lj_vm_powui(x, (uint32_t)-k); +} + +/* Computes fpm(x) for extended math functions. */ +double lj_vm_foldfpm(double x, int fpm) +{ + switch (fpm) { + case IRFPM_FLOOR: return lj_vm_floor(x); + case IRFPM_CEIL: return lj_vm_ceil(x); + case IRFPM_TRUNC: return lj_vm_trunc(x); + case IRFPM_SQRT: return sqrt(x); + case IRFPM_EXP: return exp(x); + case IRFPM_EXP2: return lj_vm_exp2(x); + case IRFPM_LOG: return log(x); + case IRFPM_LOG2: return lj_vm_log2(x); + case IRFPM_LOG10: return log10(x); + case IRFPM_SIN: return sin(x); + case IRFPM_COS: return cos(x); + case IRFPM_TAN: return tan(x); + default: lua_assert(0); + } + return 0; +} +#endif + +#endif diff --git a/src/luajit2/src/lua.h b/src/luajit2/src/lua.h new file mode 100644 index 0000000000..0e98b3744c --- /dev/null +++ b/src/luajit2/src/lua.h @@ -0,0 +1,388 @@ +/* +** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $ +** Lua - An Extensible Extension Language +** Lua.org, PUC-Rio, Brazil (http://www.lua.org) +** See Copyright Notice at the end of this file +*/ + + +#ifndef lua_h +#define lua_h + +#include <stdarg.h> +#include <stddef.h> + + +#include "luaconf.h" + + +#define LUA_VERSION "Lua 5.1" +#define LUA_RELEASE "Lua 5.1.4" +#define LUA_VERSION_NUM 501 +#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio" +#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" + + +/* mark for precompiled code (`<esc>Lua') */ +#define LUA_SIGNATURE "\033Lua" + +/* option for multiple returns in `lua_pcall' and `lua_call' */ +#define LUA_MULTRET (-1) + + +/* +** pseudo-indices +*/ +#define LUA_REGISTRYINDEX (-10000) +#define LUA_ENVIRONINDEX (-10001) +#define LUA_GLOBALSINDEX (-10002) +#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) + + +/* thread status; 0 is OK */ +#define LUA_YIELD 1 +#define LUA_ERRRUN 2 +#define LUA_ERRSYNTAX 3 +#define LUA_ERRMEM 4 +#define LUA_ERRERR 5 + + +typedef struct lua_State lua_State; + +typedef int (*lua_CFunction) (lua_State *L); + + +/* +** functions that read/write blocks when loading/dumping Lua chunks +*/ +typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); + +typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); + + +/* +** prototype for memory-allocation functions +*/ +typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); + + +/* +** basic types +*/ +#define LUA_TNONE (-1) + +#define LUA_TNIL 0 +#define LUA_TBOOLEAN 1 +#define LUA_TLIGHTUSERDATA 2 +#define LUA_TNUMBER 3 +#define LUA_TSTRING 4 +#define LUA_TTABLE 5 +#define LUA_TFUNCTION 6 +#define LUA_TUSERDATA 7 +#define LUA_TTHREAD 8 + + + +/* minimum Lua stack available to a C function */ +#define LUA_MINSTACK 20 + + +/* +** generic extra include file +*/ +#if defined(LUA_USER_H) +#include LUA_USER_H +#endif + + +/* type of numbers in Lua */ +typedef LUA_NUMBER lua_Number; + + +/* type for integer functions */ +typedef LUA_INTEGER lua_Integer; + + + +/* +** state manipulation +*/ +LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); +LUA_API void (lua_close) (lua_State *L); +LUA_API lua_State *(lua_newthread) (lua_State *L); + +LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); + + +/* +** basic stack manipulation +*/ +LUA_API int (lua_gettop) (lua_State *L); +LUA_API void (lua_settop) (lua_State *L, int idx); +LUA_API void (lua_pushvalue) (lua_State *L, int idx); +LUA_API void (lua_remove) (lua_State *L, int idx); +LUA_API void (lua_insert) (lua_State *L, int idx); +LUA_API void (lua_replace) (lua_State *L, int idx); +LUA_API int (lua_checkstack) (lua_State *L, int sz); + +LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); + + +/* +** access functions (stack -> C) +*/ + +LUA_API int (lua_isnumber) (lua_State *L, int idx); +LUA_API int (lua_isstring) (lua_State *L, int idx); +LUA_API int (lua_iscfunction) (lua_State *L, int idx); +LUA_API int (lua_isuserdata) (lua_State *L, int idx); +LUA_API int (lua_type) (lua_State *L, int idx); +LUA_API const char *(lua_typename) (lua_State *L, int tp); + +LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); + +LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); +LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); +LUA_API int (lua_toboolean) (lua_State *L, int idx); +LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); +LUA_API size_t (lua_objlen) (lua_State *L, int idx); +LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); +LUA_API void *(lua_touserdata) (lua_State *L, int idx); +LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); +LUA_API const void *(lua_topointer) (lua_State *L, int idx); + + +/* +** push functions (C -> stack) +*/ +LUA_API void (lua_pushnil) (lua_State *L); +LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); +LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); +LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); +LUA_API void (lua_pushstring) (lua_State *L, const char *s); +LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, + va_list argp); +LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); +LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); +LUA_API void (lua_pushboolean) (lua_State *L, int b); +LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); +LUA_API int (lua_pushthread) (lua_State *L); + + +/* +** get functions (Lua -> stack) +*/ +LUA_API void (lua_gettable) (lua_State *L, int idx); +LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawget) (lua_State *L, int idx); +LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); +LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); +LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); +LUA_API int (lua_getmetatable) (lua_State *L, int objindex); +LUA_API void (lua_getfenv) (lua_State *L, int idx); + + +/* +** set functions (stack -> Lua) +*/ +LUA_API void (lua_settable) (lua_State *L, int idx); +LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawset) (lua_State *L, int idx); +LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); +LUA_API int (lua_setmetatable) (lua_State *L, int objindex); +LUA_API int (lua_setfenv) (lua_State *L, int idx); + + +/* +** `load' and `call' functions (load and run Lua code) +*/ +LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); +LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); +LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); +LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname); + +LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); + + +/* +** coroutine functions +*/ +LUA_API int (lua_yield) (lua_State *L, int nresults); +LUA_API int (lua_resume) (lua_State *L, int narg); +LUA_API int (lua_status) (lua_State *L); + +/* +** garbage-collection function and options +*/ + +#define LUA_GCSTOP 0 +#define LUA_GCRESTART 1 +#define LUA_GCCOLLECT 2 +#define LUA_GCCOUNT 3 +#define LUA_GCCOUNTB 4 +#define LUA_GCSTEP 5 +#define LUA_GCSETPAUSE 6 +#define LUA_GCSETSTEPMUL 7 + +LUA_API int (lua_gc) (lua_State *L, int what, int data); + + +/* +** miscellaneous functions +*/ + +LUA_API int (lua_error) (lua_State *L); + +LUA_API int (lua_next) (lua_State *L, int idx); + +LUA_API void (lua_concat) (lua_State *L, int n); + +LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define lua_pop(L,n) lua_settop(L, -(n)-1) + +#define lua_newtable(L) lua_createtable(L, 0, 0) + +#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) + +#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) + +#define lua_strlen(L,i) lua_objlen(L, (i)) + +#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) +#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) +#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) +#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) +#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) +#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) +#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) +#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) + +#define lua_pushliteral(L, s) \ + lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) + +#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) +#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) + +#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) + + + +/* +** compatibility macros and functions +*/ + +#define lua_open() luaL_newstate() + +#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) + +#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) + +#define lua_Chunkreader lua_Reader +#define lua_Chunkwriter lua_Writer + + +/* hack */ +LUA_API void lua_setlevel (lua_State *from, lua_State *to); + + +/* +** {====================================================================== +** Debug API +** ======================================================================= +*/ + + +/* +** Event codes +*/ +#define LUA_HOOKCALL 0 +#define LUA_HOOKRET 1 +#define LUA_HOOKLINE 2 +#define LUA_HOOKCOUNT 3 +#define LUA_HOOKTAILRET 4 + + +/* +** Event masks +*/ +#define LUA_MASKCALL (1 << LUA_HOOKCALL) +#define LUA_MASKRET (1 << LUA_HOOKRET) +#define LUA_MASKLINE (1 << LUA_HOOKLINE) +#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) + +typedef struct lua_Debug lua_Debug; /* activation record */ + + +/* Functions to be called by the debuger in specific events */ +typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); + +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); +LUA_API lua_Hook lua_gethook (lua_State *L); +LUA_API int lua_gethookmask (lua_State *L); +LUA_API int lua_gethookcount (lua_State *L); + + +struct lua_Debug { + int event; + const char *name; /* (n) */ + const char *namewhat; /* (n) `global', `local', `field', `method' */ + const char *what; /* (S) `Lua', `C', `main', `tail' */ + const char *source; /* (S) */ + int currentline; /* (l) */ + int nups; /* (u) number of upvalues */ + int linedefined; /* (S) */ + int lastlinedefined; /* (S) */ + char short_src[LUA_IDSIZE]; /* (S) */ + /* private part */ + int i_ci; /* active function */ +}; + +/* }====================================================================== */ + + +/****************************************************************************** +* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + + +#endif diff --git a/src/luajit2/src/luaconf.h b/src/luajit2/src/luaconf.h new file mode 100644 index 0000000000..4a243ffbf2 --- /dev/null +++ b/src/luajit2/src/luaconf.h @@ -0,0 +1,131 @@ +/* +** Configuration header. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef luaconf_h +#define luaconf_h + +#include <limits.h> +#include <stddef.h> + +/* Default path for loading Lua and C modules with require(). */ +#if defined(_WIN32) +/* +** In Windows, any exclamation mark ('!') in the path is replaced by the +** path of the directory of the executable file of the current process. +*/ +#define LUA_LDIR "!\\lua\\" +#define LUA_CDIR "!\\" +#define LUA_PATH_DEFAULT \ + ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" +#define LUA_CPATH_DEFAULT \ + ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" +#else +#define LUA_ROOT "/usr/local/" +#define LUA_LDIR LUA_ROOT "share/lua/5.1/" +#define LUA_CDIR LUA_ROOT "lib/lua/5.1/" +#ifdef LUA_XROOT +#define LUA_JDIR LUA_XROOT "share/luajit-2.0.0-beta8/" +#define LUA_XPATH \ + ";" LUA_XROOT "share/lua/5.1/?.lua;" LUA_XROOT "share/lua/5.1/?/init.lua" +#define LUA_XCPATH LUA_XROOT "lib/lua/5.1/?.so;" +#else +#define LUA_JDIR LUA_ROOT "share/luajit-2.0.0-beta8/" +#define LUA_XPATH +#define LUA_XCPATH +#endif +#define LUA_PATH_DEFAULT \ + "./?.lua;" LUA_JDIR"?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua" LUA_XPATH +#define LUA_CPATH_DEFAULT \ + "./?.so;" LUA_CDIR"?.so;" LUA_XCPATH LUA_CDIR"loadall.so" +#endif + +/* Environment variable names for path overrides and initialization code. */ +#define LUA_PATH "LUA_PATH" +#define LUA_CPATH "LUA_CPATH" +#define LUA_INIT "LUA_INIT" + +/* Special file system characters. */ +#if defined(_WIN32) +#define LUA_DIRSEP "\\" +#else +#define LUA_DIRSEP "/" +#endif +#define LUA_PATHSEP ";" +#define LUA_PATH_MARK "?" +#define LUA_EXECDIR "!" +#define LUA_IGMARK "-" +#define LUA_PATH_CONFIG \ + LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \ + LUA_EXECDIR "\n" LUA_IGMARK + +/* Quoting in error messages. */ +#define LUA_QL(x) "'" x "'" +#define LUA_QS LUA_QL("%s") + +/* Various tunables. */ +#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */ +#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */ +#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */ +#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */ +#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */ + +/* Compatibility with older library function names. */ +#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */ +#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */ + +/* Configuration for the frontend (the luajit executable). */ +#if defined(luajit_c) +#define LUA_PROGNAME "luajit" /* Fallback frontend name. */ +#define LUA_PROMPT "> " /* Interactive prompt. */ +#define LUA_PROMPT2 ">> " /* Continuation prompt. */ +#define LUA_MAXINPUT 512 /* Max. input line length. */ +#endif + +/* Note: changing the following defines breaks the Lua 5.1 ABI. */ +#define LUA_INTEGER ptrdiff_t +#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */ +#define LUAL_BUFFERSIZE BUFSIZ /* Size of lauxlib and io.* buffers. */ + +/* The following defines are here only for compatibility with luaconf.h +** from the standard Lua distribution. They must not be changed for LuaJIT. +*/ +#define LUA_NUMBER_DOUBLE +#define LUA_NUMBER double +#define LUAI_UACNUMBER double +#define LUA_NUMBER_SCAN "%lf" +#define LUA_NUMBER_FMT "%.14g" +#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n)) +#define LUAI_MAXNUMBER2STR 32 +#define lua_str2number(s, p) strtod((s), (p)) +#define LUA_INTFRMLEN "l" +#define LUA_INTFRM_T long + +/* Linkage of public API functions. */ +#if defined(LUA_BUILD_AS_DLL) +#if defined(LUA_CORE) || defined(LUA_LIB) +#define LUA_API __declspec(dllexport) +#else +#define LUA_API __declspec(dllimport) +#endif +#else +#define LUA_API extern +#endif + +#define LUALIB_API LUA_API + +/* Support for internal assertions. */ +#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) +#include <assert.h> +#endif +#ifdef LUA_USE_ASSERT +#define lua_assert(x) assert(x) +#endif +#ifdef LUA_USE_APICHECK +#define luai_apicheck(L, o) { (void)L; assert(o); } +#else +#define luai_apicheck(L, o) { (void)L; } +#endif + +#endif diff --git a/src/luajit2/src/luajit.c b/src/luajit2/src/luajit.c new file mode 100644 index 0000000000..17d3b5fc3a --- /dev/null +++ b/src/luajit2/src/luajit.c @@ -0,0 +1,553 @@ +/* +** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +** +** Major portions taken verbatim or adapted from the Lua interpreter. +** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h +*/ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#define luajit_c + +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" +#include "luajit.h" + +#include "lj_arch.h" + +#if LJ_TARGET_POSIX +#include <unistd.h> +#define lua_stdin_is_tty() isatty(0) +#elif LJ_TARGET_WINDOWS +#include <io.h> +#ifdef __BORLANDC__ +#define lua_stdin_is_tty() isatty(_fileno(stdin)) +#else +#define lua_stdin_is_tty() _isatty(_fileno(stdin)) +#endif +#else +#define lua_stdin_is_tty() 1 +#endif + +static lua_State *globalL = NULL; +static const char *progname = LUA_PROGNAME; + +static void lstop(lua_State *L, lua_Debug *ar) +{ + (void)ar; /* unused arg. */ + lua_sethook(L, NULL, 0, 0); + /* Avoid luaL_error -- a C hook doesn't add an extra frame. */ + luaL_where(L, 0); + lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1)); + lua_error(L); +} + +static void laction(int i) +{ + signal(i, SIG_DFL); /* if another SIGINT happens before lstop, + terminate process (default action) */ + lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); +} + +static void print_usage(void) +{ + fprintf(stderr, + "usage: %s [options]... [script [args]...].\n" + "Available options are:\n" + " -e chunk Execute string " LUA_QL("chunk") ".\n" + " -l name Require library " LUA_QL("name") ".\n" + " -b ... Save or list bytecode.\n" + " -j cmd Perform LuaJIT control command.\n" + " -O[opt] Control LuaJIT optimizations.\n" + " -i Enter interactive mode after executing " LUA_QL("script") ".\n" + " -v Show version information.\n" + " -- Stop handling options.\n" + " - Execute stdin and stop handling options.\n" + , + progname); + fflush(stderr); +} + +static void l_message(const char *pname, const char *msg) +{ + if (pname) fprintf(stderr, "%s: ", pname); + fprintf(stderr, "%s\n", msg); + fflush(stderr); +} + +static int report(lua_State *L, int status) +{ + if (status && !lua_isnil(L, -1)) { + const char *msg = lua_tostring(L, -1); + if (msg == NULL) msg = "(error object is not a string)"; + l_message(progname, msg); + lua_pop(L, 1); + } + return status; +} + +static int traceback(lua_State *L) +{ + if (!lua_isstring(L, 1)) /* 'message' not a string? */ + return 1; /* keep it intact */ + lua_getfield(L, LUA_GLOBALSINDEX, "debug"); + if (!lua_istable(L, -1)) { + lua_pop(L, 1); + return 1; + } + lua_getfield(L, -1, "traceback"); + if (!lua_isfunction(L, -1)) { + lua_pop(L, 2); + return 1; + } + lua_pushvalue(L, 1); /* pass error message */ + lua_pushinteger(L, 2); /* skip this function and traceback */ + lua_call(L, 2, 1); /* call debug.traceback */ + return 1; +} + +static int docall(lua_State *L, int narg, int clear) +{ + int status; + int base = lua_gettop(L) - narg; /* function index */ + lua_pushcfunction(L, traceback); /* push traceback function */ + lua_insert(L, base); /* put it under chunk and args */ + signal(SIGINT, laction); + status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); + signal(SIGINT, SIG_DFL); + lua_remove(L, base); /* remove traceback function */ + /* force a complete garbage collection in case of errors */ + if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); + return status; +} + +static void print_version(void) +{ + fprintf(stderr, + LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n"); +} + +static void print_jit_status(lua_State *L) +{ + int n; + const char *s; + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ + lua_remove(L, -2); + lua_getfield(L, -1, "status"); + lua_remove(L, -2); + n = lua_gettop(L); + lua_call(L, 0, LUA_MULTRET); + fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stderr); + for (n++; (s = lua_tostring(L, n)); n++) + fprintf(stderr, " %s", s); + fputs("\n", stderr); +} + +static int getargs(lua_State *L, char **argv, int n) +{ + int narg; + int i; + int argc = 0; + while (argv[argc]) argc++; /* count total number of arguments */ + narg = argc - (n + 1); /* number of arguments to the script */ + luaL_checkstack(L, narg + 3, "too many arguments to script"); + for (i = n+1; i < argc; i++) + lua_pushstring(L, argv[i]); + lua_createtable(L, narg, n + 1); + for (i = 0; i < argc; i++) { + lua_pushstring(L, argv[i]); + lua_rawseti(L, -2, i - n); + } + return narg; +} + +static int dofile(lua_State *L, const char *name) +{ + int status = luaL_loadfile(L, name) || docall(L, 0, 1); + return report(L, status); +} + +static int dostring(lua_State *L, const char *s, const char *name) +{ + int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); + return report(L, status); +} + +static int dolibrary(lua_State *L, const char *name) +{ + lua_getglobal(L, "require"); + lua_pushstring(L, name); + return report(L, docall(L, 1, 1)); +} + +static void write_prompt(lua_State *L, int firstline) +{ + const char *p; + lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2"); + p = lua_tostring(L, -1); + if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2; + fputs(p, stdout); + fflush(stdout); + lua_pop(L, 1); /* remove global */ +} + +static int incomplete(lua_State *L, int status) +{ + if (status == LUA_ERRSYNTAX) { + size_t lmsg; + const char *msg = lua_tolstring(L, -1, &lmsg); + const char *tp = msg + lmsg - (sizeof(LUA_QL("<eof>")) - 1); + if (strstr(msg, LUA_QL("<eof>")) == tp) { + lua_pop(L, 1); + return 1; + } + } + return 0; /* else... */ +} + +static int pushline(lua_State *L, int firstline) +{ + char buf[LUA_MAXINPUT]; + write_prompt(L, firstline); + if (fgets(buf, LUA_MAXINPUT, stdin)) { + size_t len = strlen(buf); + if (len > 0 && buf[len-1] == '\n') + buf[len-1] = '\0'; + if (firstline && buf[0] == '=') + lua_pushfstring(L, "return %s", buf+1); + else + lua_pushstring(L, buf); + return 1; + } + return 0; +} + +static int loadline(lua_State *L) +{ + int status; + lua_settop(L, 0); + if (!pushline(L, 1)) + return -1; /* no input */ + for (;;) { /* repeat until gets a complete line */ + status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin"); + if (!incomplete(L, status)) break; /* cannot try to add lines? */ + if (!pushline(L, 0)) /* no more input? */ + return -1; + lua_pushliteral(L, "\n"); /* add a new line... */ + lua_insert(L, -2); /* ...between the two lines */ + lua_concat(L, 3); /* join them */ + } + lua_remove(L, 1); /* remove line */ + return status; +} + +static void dotty(lua_State *L) +{ + int status; + const char *oldprogname = progname; + progname = NULL; + while ((status = loadline(L)) != -1) { + if (status == 0) status = docall(L, 0, 0); + report(L, status); + if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ + lua_getglobal(L, "print"); + lua_insert(L, 1); + if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) + l_message(progname, + lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)", + lua_tostring(L, -1))); + } + } + lua_settop(L, 0); /* clear stack */ + fputs("\n", stdout); + fflush(stdout); + progname = oldprogname; +} + +static int handle_script(lua_State *L, char **argv, int n) +{ + int status; + const char *fname; + int narg = getargs(L, argv, n); /* collect arguments */ + lua_setglobal(L, "arg"); + fname = argv[n]; + if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0) + fname = NULL; /* stdin */ + status = luaL_loadfile(L, fname); + lua_insert(L, -(narg+1)); + if (status == 0) + status = docall(L, narg, 0); + else + lua_pop(L, narg); + return report(L, status); +} + +/* Load add-on module. */ +static int loadjitmodule(lua_State *L) +{ + lua_getglobal(L, "require"); + lua_pushliteral(L, "jit."); + lua_pushvalue(L, -3); + lua_concat(L, 2); + if (lua_pcall(L, 1, 1, 0)) { + const char *msg = lua_tostring(L, -1); + if (msg && !strncmp(msg, "module ", 7)) { + err: + l_message(progname, + "unknown luaJIT command or jit.* modules not installed"); + return 1; + } else { + return report(L, 1); + } + } + lua_getfield(L, -1, "start"); + if (lua_isnil(L, -1)) goto err; + lua_remove(L, -2); /* Drop module table. */ + return 0; +} + +/* Run command with options. */ +static int runcmdopt(lua_State *L, const char *opt) +{ + int narg = 0; + if (opt && *opt) { + for (;;) { /* Split arguments. */ + const char *p = strchr(opt, ','); + narg++; + if (!p) break; + if (p == opt) + lua_pushnil(L); + else + lua_pushlstring(L, opt, (size_t)(p - opt)); + opt = p + 1; + } + if (*opt) + lua_pushstring(L, opt); + else + lua_pushnil(L); + } + return report(L, lua_pcall(L, narg, 0, 0)); +} + +/* JIT engine control command: try jit library first or load add-on module. */ +static int dojitcmd(lua_State *L, const char *cmd) +{ + const char *opt = strchr(cmd, '='); + lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd)); + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit"); /* Get jit.* module table. */ + lua_remove(L, -2); + lua_pushvalue(L, -2); + lua_gettable(L, -2); /* Lookup library function. */ + if (!lua_isfunction(L, -1)) { + lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */ + if (loadjitmodule(L)) + return 1; + } else { + lua_remove(L, -2); /* Drop jit.* table. */ + } + lua_remove(L, -2); /* Drop module name. */ + return runcmdopt(L, opt ? opt+1 : opt); +} + +/* Optimization flags. */ +static int dojitopt(lua_State *L, const char *opt) +{ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */ + lua_remove(L, -2); + lua_getfield(L, -1, "start"); + lua_remove(L, -2); + return runcmdopt(L, opt); +} + +/* Save or list bytecode. */ +static int dobytecode(lua_State *L, char **argv) +{ + int narg = 0; + lua_pushliteral(L, "bcsave"); + if (loadjitmodule(L)) + return 1; + if (argv[0][2]) { + narg++; + argv[0][1] = '-'; + lua_pushstring(L, argv[0]+1); + } + for (argv++; *argv != NULL; narg++, argv++) + lua_pushstring(L, *argv); + return report(L, lua_pcall(L, narg, 0, 0)); +} + +/* check that argument has no extra characters at the end */ +#define notail(x) {if ((x)[2] != '\0') return -1;} + +#define FLAGS_INTERACTIVE 1 +#define FLAGS_VERSION 2 +#define FLAGS_EXEC 4 +#define FLAGS_OPTION 8 + +static int collectargs(char **argv, int *flags) +{ + int i; + for (i = 1; argv[i] != NULL; i++) { + if (argv[i][0] != '-') /* Not an option? */ + return i; + switch (argv[i][1]) { /* Check option. */ + case '-': + notail(argv[i]); + return (argv[i+1] != NULL ? i+1 : 0); + case '\0': + return i; + case 'i': + notail(argv[i]); + *flags |= FLAGS_INTERACTIVE; + /* fallthrough */ + case 'v': + notail(argv[i]); + *flags |= FLAGS_VERSION; + break; + case 'e': + *flags |= FLAGS_EXEC; + case 'j': /* LuaJIT extension */ + case 'l': + *flags |= FLAGS_OPTION; + if (argv[i][2] == '\0') { + i++; + if (argv[i] == NULL) return -1; + } + break; + case 'O': break; /* LuaJIT extension */ + case 'b': /* LuaJIT extension */ + if (*flags) return -1; + *flags |= FLAGS_EXEC; + return 0; + default: return -1; /* invalid option */ + } + } + return 0; +} + +static int runargs(lua_State *L, char **argv, int n) +{ + int i; + for (i = 1; i < n; i++) { + if (argv[i] == NULL) continue; + lua_assert(argv[i][0] == '-'); + switch (argv[i][1]) { /* option */ + case 'e': { + const char *chunk = argv[i] + 2; + if (*chunk == '\0') chunk = argv[++i]; + lua_assert(chunk != NULL); + if (dostring(L, chunk, "=(command line)") != 0) + return 1; + break; + } + case 'l': { + const char *filename = argv[i] + 2; + if (*filename == '\0') filename = argv[++i]; + lua_assert(filename != NULL); + if (dolibrary(L, filename)) + return 1; /* stop if file fails */ + break; + } + case 'j': { /* LuaJIT extension */ + const char *cmd = argv[i] + 2; + if (*cmd == '\0') cmd = argv[++i]; + lua_assert(cmd != NULL); + if (dojitcmd(L, cmd)) + return 1; + break; + } + case 'O': /* LuaJIT extension */ + if (dojitopt(L, argv[i] + 2)) + return 1; + break; + case 'b': /* LuaJIT extension */ + return dobytecode(L, argv+i); + default: break; + } + } + return 0; +} + +static int handle_luainit(lua_State *L) +{ + const char *init = getenv(LUA_INIT); + if (init == NULL) + return 0; /* status OK */ + else if (init[0] == '@') + return dofile(L, init+1); + else + return dostring(L, init, "=" LUA_INIT); +} + +struct Smain { + char **argv; + int argc; + int status; +}; + +static int pmain(lua_State *L) +{ + struct Smain *s = (struct Smain *)lua_touserdata(L, 1); + char **argv = s->argv; + int script; + int flags = 0; + globalL = L; + if (argv[0] && argv[0][0]) progname = argv[0]; + LUAJIT_VERSION_SYM(); /* linker-enforced version check */ + lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ + luaL_openlibs(L); /* open libraries */ + lua_gc(L, LUA_GCRESTART, -1); + s->status = handle_luainit(L); + if (s->status != 0) return 0; + script = collectargs(argv, &flags); + if (script < 0) { /* invalid args? */ + print_usage(); + s->status = 1; + return 0; + } + if ((flags & FLAGS_VERSION)) print_version(); + s->status = runargs(L, argv, (script > 0) ? script : s->argc); + if (s->status != 0) return 0; + if (script) + s->status = handle_script(L, argv, script); + if (s->status != 0) return 0; + if ((flags & FLAGS_INTERACTIVE)) { + print_jit_status(L); + dotty(L); + } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) { + if (lua_stdin_is_tty()) { + print_version(); + print_jit_status(L); + dotty(L); + } else { + dofile(L, NULL); /* executes stdin as a file */ + } + } + return 0; +} + +int main(int argc, char **argv) +{ + int status; + struct Smain s; + lua_State *L = lua_open(); /* create state */ + if (L == NULL) { + l_message(argv[0], "cannot create state: not enough memory"); + return EXIT_FAILURE; + } + s.argc = argc; + s.argv = argv; + status = lua_cpcall(L, pmain, &s); + report(L, status); + lua_close(L); + return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS; +} + diff --git a/src/luajit2/src/luajit.h b/src/luajit2/src/luajit.h new file mode 100644 index 0000000000..38be73da0e --- /dev/null +++ b/src/luajit2/src/luajit.h @@ -0,0 +1,70 @@ +/* +** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ +** +** Copyright (C) 2005-2011 Mike Pall. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining +** a copy of this software and associated documentation files (the +** "Software"), to deal in the Software without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to +** permit persons to whom the Software is furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +** +** [ MIT license: http://www.opensource.org/licenses/mit-license.php ] +*/ + +#ifndef _LUAJIT_H +#define _LUAJIT_H + +#include "lua.h" + +#define LUAJIT_VERSION "LuaJIT 2.0.0-beta8" +#define LUAJIT_VERSION_NUM 20000 /* Version 2.0.0 = 02.00.00. */ +#define LUAJIT_VERSION_SYM luaJIT_version_2_0_0_beta8 +#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2011 Mike Pall" +#define LUAJIT_URL "http://luajit.org/" + +/* Modes for luaJIT_setmode. */ +#define LUAJIT_MODE_MASK 0x00ff + +enum { + LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */ + LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */ + + LUAJIT_MODE_FUNC, /* Change mode for a function. */ + LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */ + LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */ + + LUAJIT_MODE_TRACE, /* Flush a compiled trace. */ + + LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */ + + LUAJIT_MODE_MAX +}; + +/* Flags or'ed in to the mode. */ +#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */ +#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */ +#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */ + +/* LuaJIT public C API. */ + +/* Control the JIT engine. */ +LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode); + +/* Enforce (dynamic) linker error for version mismatches. Call from main. */ +LUA_API void LUAJIT_VERSION_SYM(void); + +#endif diff --git a/src/luajit2/src/lualib.h b/src/luajit2/src/lualib.h new file mode 100644 index 0000000000..35fb698ac3 --- /dev/null +++ b/src/luajit2/src/lualib.h @@ -0,0 +1,43 @@ +/* +** Standard library header. +** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LUALIB_H +#define _LUALIB_H + +#include "lua.h" + +#define LUA_FILEHANDLE "FILE*" + +#define LUA_COLIBNAME "coroutine" +#define LUA_MATHLIBNAME "math" +#define LUA_STRLIBNAME "string" +#define LUA_TABLIBNAME "table" +#define LUA_IOLIBNAME "io" +#define LUA_OSLIBNAME "os" +#define LUA_LOADLIBNAME "package" +#define LUA_DBLIBNAME "debug" +#define LUA_BITLIBNAME "bit" +#define LUA_JITLIBNAME "jit" +#define LUA_FFILIBNAME "ffi" + +LUALIB_API int luaopen_base(lua_State *L); +LUALIB_API int luaopen_math(lua_State *L); +LUALIB_API int luaopen_string(lua_State *L); +LUALIB_API int luaopen_table(lua_State *L); +LUALIB_API int luaopen_io(lua_State *L); +LUALIB_API int luaopen_os(lua_State *L); +LUALIB_API int luaopen_package(lua_State *L); +LUALIB_API int luaopen_debug(lua_State *L); +LUALIB_API int luaopen_bit(lua_State *L); +LUALIB_API int luaopen_jit(lua_State *L); +LUALIB_API int luaopen_ffi(lua_State *L); + +LUALIB_API void luaL_openlibs(lua_State *L); + +#ifndef lua_assert +#define lua_assert(x) ((void)0) +#endif + +#endif diff --git a/src/luajit2/tome.patch b/src/luajit2/tome.patch new file mode 100644 index 0000000000..f465856b25 --- /dev/null +++ b/src/luajit2/tome.patch @@ -0,0 +1,149 @@ +Only in src/: buildvm +diff -u -r /Test/LuaJIT-2.0.0-beta8/src/lib_aux.c src/lib_aux.c +--- /Test/LuaJIT-2.0.0-beta8/src/lib_aux.c 2011-06-23 19:40:00.000000000 +0200 ++++ src/lib_aux.c 2011-06-25 01:11:28.000000000 +0200 +@@ -13,6 +13,8 @@ + #define lib_aux_c + #define LUA_LIB + ++#include "physfs.h" ++ + #include "lua.h" + #include "lauxlib.h" + +@@ -236,7 +238,7 @@ + /* -- Load Lua code ------------------------------------------------------- */ + + typedef struct FileReaderCtx { +- FILE *fp; ++ PHYSFS_File *fp; + char buf[LUAL_BUFFERSIZE]; + } FileReaderCtx; + +@@ -244,8 +246,8 @@ + { + FileReaderCtx *ctx = (FileReaderCtx *)ud; + UNUSED(L); +- if (feof(ctx->fp)) return NULL; +- *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp); ++ if (PHYSFS_eof(ctx->fp)) return NULL; ++ *size = PHYSFS_read(ctx->fp, ctx->buf, 1, sizeof(ctx->buf)); + return *size > 0 ? ctx->buf : NULL; + } + +@@ -255,28 +257,28 @@ + int status; + const char *chunkname; + if (filename) { +- ctx.fp = fopen(filename, "rb"); ++ ctx.fp = PHYSFS_openRead(filename); + if (ctx.fp == NULL) { + lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno)); + return LUA_ERRFILE; + } + chunkname = lua_pushfstring(L, "@%s", filename); + } else { +- ctx.fp = stdin; +- chunkname = "=stdin"; ++// ctx.fp = stdin; ++// chunkname = "=stdin"; + } + status = lua_load(L, reader_file, &ctx, chunkname); +- if (ferror(ctx.fp)) { ++/* if (ferror(ctx.fp)) { + L->top -= filename ? 2 : 1; + lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno)); + if (filename) + fclose(ctx.fp); + return LUA_ERRFILE; +- } ++ } */ + if (filename) { + L->top--; + copyTV(L, L->top-1, L->top); +- fclose(ctx.fp); ++ PHYSFS_close(ctx.fp); + } + return status; + } +diff -u -r /Test/LuaJIT-2.0.0-beta8/src/lib_package.c src/lib_package.c +--- /Test/LuaJIT-2.0.0-beta8/src/lib_package.c 2011-06-23 19:40:00.000000000 +0200 ++++ src/lib_package.c 2011-06-25 01:08:37.000000000 +0200 +@@ -17,6 +17,8 @@ + #include "lj_err.h" + #include "lj_lib.h" + ++#include "physfs.h" ++ + /* ------------------------------------------------------------------------ */ + + /* Error codes for ll_loadfunc. */ +@@ -193,10 +195,7 @@ + + static int readable(const char *filename) + { +- FILE *f = fopen(filename, "r"); /* try to open file */ +- if (f == NULL) return 0; /* open failed */ +- fclose(f); +- return 1; ++ return PHYSFS_exists(filename); + } + + static const char *pushnexttemplate(lua_State *L, const char *path) +diff -u -r /Test/LuaJIT-2.0.0-beta8/src/lib_table.c src/lib_table.c +--- /Test/LuaJIT-2.0.0-beta8/src/lib_table.c 2011-06-23 19:40:00.000000000 +0200 ++++ src/lib_table.c 2011-06-25 01:08:37.000000000 +0200 +@@ -178,7 +178,7 @@ + + static int sort_comp(lua_State *L, int a, int b) + { +- if (!lua_isnil(L, 2)) { /* function? */ ++ if (lua_isfunction(L, 2)) { /* function? */ + int res; + lua_pushvalue(L, 2); + lua_pushvalue(L, a-1); /* -1 to compensate function */ +@@ -187,9 +187,20 @@ + res = lua_toboolean(L, -1); + lua_pop(L, 1); + return res; +- } else { /* a < b? */ +- return lua_lessthan(L, a, b); + } ++ else if (!lua_isnil(L, 2)) { /* index? */ ++ int res; ++ lua_pushvalue(L, 2); ++ lua_gettable(L, a-1); ++ ++ lua_pushvalue(L, 2); ++ lua_gettable(L, b-2); ++ res = lua_lessthan(L, -2, -1); ++ lua_pop(L, 2); ++ return res; ++ } ++ else /* a < b? */ ++ return lua_lessthan(L, a, b); + } + + static void auxsort(lua_State *L, int l, int u) +@@ -260,8 +271,8 @@ + GCtab *t = lj_lib_checktab(L, 1); + int32_t n = (int32_t)lj_tab_len(t); + lua_settop(L, 2); +- if (!tvisnil(L->base+1)) +- lj_lib_checkfunc(L, 2); ++/* if (!tvisnil(L->base+1)) ++ lj_lib_checkfunc(L, 2);*/ + auxsort(L, 1, n); + return 0; + } +Only in /Test/LuaJIT-2.0.0-beta8/src/: ljamalg.c +Only in src/: lj_bcdef.h +Only in src/: lj_ffdef.h +Only in src/: lj_folddef.h +Only in src/: lj_libdef.h +Only in src/: lj_recdef.h +Only in src/: lj_vm.s +Only in src/: luajit +Only in /Test/LuaJIT-2.0.0-beta8/src/: Makefile +Only in /Test/LuaJIT-2.0.0-beta8/src/: Makefile.dep +Only in /Test/LuaJIT-2.0.0-beta8/src/: msvcbuild.bat diff --git a/src/luajit2/vmdef.lua b/src/luajit2/vmdef.lua new file mode 100644 index 0000000000..d7e5225030 --- /dev/null +++ b/src/luajit2/vmdef.lua @@ -0,0 +1,284 @@ +-- This is a generated file. DO NOT EDIT! + +module(...) + +bcnames = "ISLT ISGE ISLE ISGT ISEQV ISNEV ISEQS ISNES ISEQN ISNEN ISEQP ISNEP ISTC ISFC IST ISF MOV NOT UNM LEN ADDVN SUBVN MULVN DIVVN MODVN ADDNV SUBNV MULNV DIVNV MODNV ADDVV SUBVV MULVV DIVVV MODVV POW CAT KSTR KCDATAKSHORTKNUM KPRI KNIL UGET USETV USETS USETN USETP UCLO FNEW TNEW TDUP GGET GSET TGETV TGETS TGETB TSETV TSETS TSETB TSETM CALLM CALL CALLMTCALLT ITERC ITERN VARG ISNEXTRETM RET RET0 RET1 FORI JFORI FORL IFORL JFORL ITERL IITERLJITERLLOOP ILOOP JLOOP JMP FUNCF IFUNCFJFUNCFFUNCV IFUNCVJFUNCVFUNCC FUNCCW" + +irnames = "LT GE LE GT ULT UGE ULE UGT EQ NE ABC RETF NOP BASE HIOP LOOP USE PHI RENAMEKPRI KINT KGC KPTR KKPTR KNULL KNUM KINT64KSLOT BNOT BSWAP BAND BOR BXOR BSHL BSHR BSAR BROL BROR ADD SUB MUL DIV MOD POW NEG ABS ATAN2 LDEXP MIN MAX FPMATHADDOV SUBOV MULOV AREF HREFK HREF NEWREFUREFO UREFC FREF STRREFALOAD HLOAD ULOAD FLOAD XLOAD SLOAD VLOAD ASTOREHSTOREUSTOREFSTOREXSTORESNEW XSNEW TNEW TDUP CNEW CNEWI TBAR OBAR XBAR CONV TOBIT TOSTR STRTO CALLN CALLL CALLS CALLXSCARG " + +irfpm = { [0]="floor", "ceil", "trunc", "sqrt", "exp", "exp2", "log", "log2", "log10", "sin", "cos", "tan", "other", } + +irfield = { [0]="str.len", "func.env", "tab.meta", "tab.array", "tab.node", "tab.asize", "tab.hmask", "tab.nomm", "udata.meta", "udata.udtype", "udata.file", "cdata.typeid", "cdata.ptr", "cdata.int64", "cdata.int64hi", } + +ircall = { +[0]="lj_str_cmp", +"lj_str_new", +"lj_str_tonum", +"lj_str_fromint", +"lj_str_fromnum", +"lj_tab_new1", +"lj_tab_dup", +"lj_tab_newkey", +"lj_tab_len", +"lj_gc_step_jit", +"lj_gc_barrieruv", +"lj_mem_newgco", +"lj_math_random_step", +"lj_vm_modi", +"lj_carith_divi64", +"lj_carith_divu64", +"lj_carith_modi64", +"lj_carith_modu64", +"lj_carith_powi64", +"lj_carith_powu64", +"lj_cdata_setfin", +"strlen", +"memcpy", +"memset", +"sinh", +"cosh", +"tanh", +"fputc", +"fwrite", +"fflush", +} + +traceerr = { +[0]="error thrown or hook called during recording", +"trace too long", +"trace too deep", +"too many snapshots", +"blacklisted", +"NYI: bytecode %d", +"leaving loop in root trace", +"inner loop in root trace", +"loop unroll limit reached", +"bad argument type", +"call to JIT-disabled function", +"call unroll limit reached", +"down-recursion, restarting", +"NYI: C function %p", +"NYI: FastFunc %s", +"NYI: unsupported variant of FastFunc %s", +"NYI: return to lower frame", +"store with nil or NaN key", +"missing metamethod", +"looping index lookup", +"NYI: mixed sparse/dense table", +"symbol not in cache", +"NYI: unsupported C type conversion", +"NYI: unsupported C function type", +"guard would always fail", +"too many PHIs", +"persistent type instability", +"failed to allocate mcode memory", +"machine code too long", +"hit mcode limit (retrying)", +"too many spill slots", +"inconsistent register allocation", +"NYI: cannot assemble IR instruction %d", +"NYI: PHI shuffling too complex", +"NYI: register coalescing too complex", +} + +ffnames = { +[0]="Lua", +"C", +"assert", +"type", +"getmetatable", +"setmetatable", +"getfenv", +"setfenv", +"rawget", +"rawset", +"rawequal", +"unpack", +"select", +"tonumber", +"tostring", +"next", +"pairs", +"ipairs_aux", +"ipairs", +"error", +"pcall", +"xpcall", +"loadstring", +"loadfile", +"load", +"dofile", +"gcinfo", +"collectgarbage", +"newproxy", +"print", +"coroutine.status", +"coroutine.running", +"coroutine.create", +"coroutine.yield", +"coroutine.resume", +"coroutine.wrap_aux", +"coroutine.wrap", +"math.abs", +"math.floor", +"math.ceil", +"math.sqrt", +"math.log", +"math.log10", +"math.exp", +"math.sin", +"math.cos", +"math.tan", +"math.asin", +"math.acos", +"math.atan", +"math.sinh", +"math.cosh", +"math.tanh", +"math.frexp", +"math.modf", +"math.deg", +"math.rad", +"math.atan2", +"math.pow", +"math.fmod", +"math.ldexp", +"math.min", +"math.max", +"math.random", +"math.randomseed", +"bit.tobit", +"bit.bnot", +"bit.bswap", +"bit.lshift", +"bit.rshift", +"bit.arshift", +"bit.rol", +"bit.ror", +"bit.band", +"bit.bor", +"bit.bxor", +"bit.tohex", +"string.len", +"string.byte", +"string.char", +"string.sub", +"string.rep", +"string.reverse", +"string.lower", +"string.upper", +"string.dump", +"string.find", +"string.match", +"string.gmatch_aux", +"string.gmatch", +"string.gsub", +"string.format", +"table.foreachi", +"table.foreach", +"table.getn", +"table.maxn", +"table.insert", +"table.remove", +"table.concat", +"table.sort", +"io.method.close", +"io.method.read", +"io.method.write", +"io.method.flush", +"io.method.seek", +"io.method.setvbuf", +"io.method.lines", +"io.method.__gc", +"io.method.__tostring", +"io.open", +"io.popen", +"io.tmpfile", +"io.close", +"io.read", +"io.write", +"io.flush", +"io.input", +"io.output", +"io.lines_iter", +"io.lines", +"io.type", +"os.execute", +"os.remove", +"os.rename", +"os.tmpname", +"os.getenv", +"os.exit", +"os.clock", +"os.date", +"os.time", +"os.difftime", +"os.setlocale", +"debug.getregistry", +"debug.getmetatable", +"debug.setmetatable", +"debug.getfenv", +"debug.setfenv", +"debug.getinfo", +"debug.getlocal", +"debug.setlocal", +"debug.getupvalue", +"debug.setupvalue", +"debug.sethook", +"debug.gethook", +"debug.debug", +"debug.traceback", +"jit.on", +"jit.off", +"jit.flush", +"jit.status", +"jit.attach", +"jit.util.funcinfo", +"jit.util.funcbc", +"jit.util.funck", +"jit.util.funcuvname", +"jit.util.traceinfo", +"jit.util.traceir", +"jit.util.tracek", +"jit.util.tracesnap", +"jit.util.tracemc", +"jit.util.traceexitstub", +"jit.util.ircalladdr", +"jit.opt.start", +"ffi.meta.__index", +"ffi.meta.__newindex", +"ffi.meta.__eq", +"ffi.meta.__len", +"ffi.meta.__lt", +"ffi.meta.__le", +"ffi.meta.__concat", +"ffi.meta.__call", +"ffi.meta.__add", +"ffi.meta.__sub", +"ffi.meta.__mul", +"ffi.meta.__div", +"ffi.meta.__mod", +"ffi.meta.__pow", +"ffi.meta.__unm", +"ffi.meta.__tostring", +"ffi.clib.__index", +"ffi.clib.__newindex", +"ffi.clib.__gc", +"ffi.cdef", +"ffi.new", +"ffi.cast", +"ffi.typeof", +"ffi.istype", +"ffi.sizeof", +"ffi.alignof", +"ffi.offsetof", +"ffi.errno", +"ffi.string", +"ffi.copy", +"ffi.fill", +"ffi.abi", +"ffi.metatype", +"ffi.gc", +"ffi.load", +} + diff --git a/src/main.c b/src/main.c index 183da2b690..271ce3d794 100644 --- a/src/main.c +++ b/src/main.c @@ -175,23 +175,30 @@ void stackDump (lua_State *L) { int docall (lua_State *L, int narg, int nret) { -// printf("<===%d\n", lua_gettop(L)); +#if 1 int status; int base = lua_gettop(L) - narg; /* function index */ +// printf("<===%d (%d)\n", base, narg); lua_pushcfunction(L, traceback); /* push traceback function */ lua_insert(L, base); /* put it under chunk and args */ status = lua_pcall(L, narg, nret, base); lua_remove(L, base); /* remove traceback function */ /* force a complete garbage collection in case of errors */ if (status != 0) { lua_pop(L, 1); lua_gc(L, LUA_GCCOLLECT, 0); } -// printf(">===%d\n", lua_gettop(L)); - if (lua_gettop(L) != nret) +// printf(">===%d (%d) [%d]\n", lua_gettop(L), nret, status); + if (lua_gettop(L) != nret + (base - 1)) { stackDump(L); // assert(0); lua_settop(L, base); } return status; +#else + int status=0; + int base = lua_gettop(L) - narg; /* function index */ + lua_call(L, narg, nret); + return status; +#endif } /* No print function, does .. nothing */ -- GitLab