/*
* This file was generated automatically by gen-mterp.py for 'x86'.
*
* --> DO NOT EDIT <--
*/
/* File: c/header.c */
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* common includes */
#include "Dalvik.h"
#include "interp/InterpDefs.h"
#include "mterp/Mterp.h"
#include <math.h> // needed for fmod, fmodf
#include "mterp/common/FindInterface.h"
/*
* Configuration defines. These affect the C implementations, i.e. the
* portable interpreter(s) and C stubs.
*
* Some defines are controlled by the Makefile, e.g.:
* WITH_INSTR_CHECKS
* WITH_TRACKREF_CHECKS
* EASY_GDB
* NDEBUG
*
* If THREADED_INTERP is not defined, we use a classic "while true / switch"
* interpreter. If it is defined, then the tail end of each instruction
* handler fetches the next instruction and jumps directly to the handler.
* This increases the size of the "Std" interpreter by about 10%, but
* provides a speedup of about the same magnitude.
*
* There's a "hybrid" approach that uses a goto table instead of a switch
* statement, avoiding the "is the opcode in range" tests required for switch.
* The performance is close to the threaded version, and without the 10%
* size increase, but the benchmark results are off enough that it's not
* worth adding as a third option.
*/
#define THREADED_INTERP /* threaded vs. while-loop interpreter */
#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
# define CHECK_BRANCH_OFFSETS
# define CHECK_REGISTER_INDICES
#endif
/*
* ARM EABI requires 64-bit alignment for access to 64-bit data types. We
* can't just use pointers to copy 64-bit values out of our interpreted
* register set, because gcc will generate ldrd/strd.
*
* The __UNION version copies data in and out of a union. The __MEMCPY
* version uses a memcpy() call to do the transfer; gcc is smart enough to
* not actually call memcpy(). The __UNION version is very bad on ARM;
* it only uses one more instruction than __MEMCPY, but for some reason
* gcc thinks it needs separate storage for every instance of the union.
* On top of that, it feels the need to zero them out at the start of the
* method. Net result is we zero out ~700 bytes of stack space at the top
* of the interpreter using ARM STM instructions.
*/
#if defined(__ARM_EABI__)
//# define NO_UNALIGN_64__UNION
# define NO_UNALIGN_64__MEMCPY
#endif
//#define LOG_INSTR /* verbose debugging */
/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
/*
* Keep a tally of accesses to fields. Currently only works if full DEX
* optimization is disabled.
*/
#ifdef PROFILE_FIELD_ACCESS
# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
#else
# define UPDATE_FIELD_GET(_field) ((void)0)
# define UPDATE_FIELD_PUT(_field) ((void)0)
#endif
/*
* Export another copy of the PC on every instruction; this is largely
* redundant with EXPORT_PC and the debugger code. This value can be
* compared against what we have stored on the stack with EXPORT_PC to
* help ensure that we aren't missing any export calls.
*/
#if WITH_EXTRA_GC_CHECKS > 1
# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
#else
# define EXPORT_EXTRA_PC()
#endif
/*
* Adjust the program counter. "_offset" is a signed int, in 16-bit units.
*
* Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
*
* We don't advance the program counter until we finish an instruction or
* branch, because we do want to have to unroll the PC if there's an
* exception.
*/
#ifdef CHECK_BRANCH_OFFSETS
# define ADJUST_PC(_offset) do { \
int myoff = _offset; /* deref only once */ \
if (pc + myoff < curMethod->insns || \
pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
{ \
char* desc; \
desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
myoff, (int) (pc - curMethod->insns), \
curMethod->clazz->descriptor, curMethod->name, desc); \
free(desc); \
dvmAbort(); \
} \
pc += myoff; \
EXPORT_EXTRA_PC(); \
} while (false)
#else
# define ADJUST_PC(_offset) do { \
pc += _offset; \
EXPORT_EXTRA_PC(); \
} while (false)
#endif
/*
* If enabled, log instructions as we execute them.
*/
#ifdef LOG_INSTR
# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
# define ILOG(_level, ...) do { \
char debugStrBuf[128]; \
snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
if (curMethod != NULL) \
LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
else \
LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
self->threadId, debugStrBuf); \
} while(false)
void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
static const char kSpacing[] = " ";
#else
# define ILOGD(...) ((void)0)
# define ILOGV(...) ((void)0)
# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
#endif
/* get a long from an array of u4 */
static inline s8 getLongFromArray(const u4* ptr, int idx)
{
#if defined(NO_UNALIGN_64__UNION)
union { s8 ll; u4 parts[2]; } conv;
ptr += idx;
conv.parts[0] = ptr[0];
conv.parts[1] = ptr[1];
return conv.ll;
#elif defined(NO_UNALIGN_64__MEMCPY)
s8 val;
memcpy(&val, &ptr[idx], 8);
return val;
#else
return *((s8*) &ptr[idx]);
#endif
}
/* store a long into an array of u4 */
static inline void putLongToArray(u4* ptr, int idx, s8 val)
{
#if defined(NO_UNALIGN_64__UNION)
union { s8 ll; u4 parts[2]; } conv;
ptr += idx;
conv.ll = val;
ptr[0] = conv.parts[0];
ptr[1] = conv.parts[1];
#elif defined(NO_UNALIGN_64__MEMCPY)
memcpy(&ptr[idx], &val, 8);
#else
*((s8*) &ptr[idx]) = val;
#endif
}
/* get a double from an array of u4 */
static inline double getDoubleFromArray(const u4* ptr, int idx)
{
#if defined(NO_UNALIGN_64__UNION)
union { double d; u4 parts[2]; } conv;
ptr += idx;
conv.parts[0] = ptr[0];
conv.parts[1] = ptr[1];
return conv.d;
#elif defined(NO_UNALIGN_64__MEMCPY)
double dval;
memcpy(&dval, &ptr[idx], 8);
return dval;
#else
return *((double*) &ptr[idx]);
#endif
}
/* store a double into an array of u4 */
static inline void putDoubleToArray(u4* ptr, int idx, double dval)
{
#if defined(NO_UNALIGN_64__UNION)
union { double d; u4 parts[2]; } conv;
ptr += idx;
conv.d = dval;
ptr[0] = conv.parts[0];
ptr[1] = conv.parts[1];
#elif defined(NO_UNALIGN_64__MEMCPY)
memcpy(&ptr[idx], &dval, 8);
#else
*((double*) &ptr[idx]) = dval;
#endif
}
/*
* If enabled, validate the register number on every access. Otherwise,
* just do an array access.
*
* Assumes the existence of "u4* fp".
*
* "_idx" may be referenced more than once.
*/
#ifdef CHECK_REGISTER_INDICES
# define GET_REGISTER(_idx) \
( (_idx) < curMethod->registersSize ? \
(fp[(_idx)]) : (assert(!"bad reg"),1969) )
# define SET_REGISTER(_idx, _val) \
( (_idx) < curMethod->registersSize ? \
(fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
# define GET_REGISTER_WIDE(_idx) \
( (_idx) < curMethod->registersSize-1 ? \
getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
# define SET_REGISTER_WIDE(_idx, _val) \
( (_idx) < curMethod->registersSize-1 ? \
putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
# define GET_REGISTER_FLOAT(_idx) \
( (_idx) < curMethod->registersSize ? \
(*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
# define SET_REGISTER_FLOAT(_idx, _val) \
( (_idx) < curMethod->registersSize ? \
(*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
# define GET_REGISTER_DOUBLE(_idx) \
( (_idx) < curMethod->registersSize-1 ? \
getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
# define SET_REGISTER_DOUBLE(_idx, _val) \
( (_idx) < curMethod->registersSize-1 ? \
putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
#else
# define GET_REGISTER(_idx) (fp[(_idx)])
# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
#endif
/*
* Get 16 bits from the specified offset of the program counter. We always
* want to load 16 bits at a time from the instruction stream -- it's more
* efficient than 8 and won't have the alignment problems that 32 might.
*
* Assumes existence of "const u2* pc".
*/
#define FETCH(_offset) (pc[(_offset)])
/*
* Extract instruction byte from 16-bit fetch (_inst is a u2).
*/
#define INST_INST(_inst) ((_inst) & 0xff)
/*
* Replace the opcode (used when handling breakpoints). _opcode is a u1.
*/
#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
/*
* Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
*/
#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
#define INST_B(_inst) ((_inst) >> 12)
/*
* Get the 8-bit "vAA" 8-bit register index from the instruction word.
* (_inst is u2)
*/
#define INST_AA(_inst) ((_inst) >> 8)
/*
* The current PC must be available to Throwable constructors, e.g.
* those created by dvmThrowException(), so that the exception stack
* trace can be generated correctly. If we don't do this, the offset
* within the current method won't be shown correctly. See the notes
* in Exception.c.
*
* This is also used to determine the address for precise GC.
*
* Assumes existence of "u4* fp" and "const u2* pc".
*/
#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
/*
* Determine if we need to switch to a different interpreter. "_current"
* is either INTERP_STD or INTERP_DBG. It should be fixed for a given
* interpreter generation file, which should remove the outer conditional
* from the following.
*
* If we're building without debug and profiling support, we never switch.
*/
#if defined(WITH_JIT)
# define NEED_INTERP_SWITCH(_current) ( \
(_current == INTERP_STD) ? \
dvmJitDebuggerOrProfilerActive() : !dvmJitDebuggerOrProfilerActive() )
#else
# define NEED_INTERP_SWITCH(_current) ( \
(_current == INTERP_STD) ? \
dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
#endif
/*
* Check to see if "obj" is NULL. If so, throw an exception. Assumes the
* pc has already been exported to the stack.
*
* Perform additional checks on debug builds.
*
* Use this to check for NULL when the instruction handler calls into
* something that could throw an exception (so we have already called
* EXPORT_PC at the top).
*/
static inline bool checkForNull(Object* obj)
{
if (obj == NULL) {
dvmThrowException("Ljava/lang/NullPointerException;", NULL);
return false;
}
#ifdef WITH_EXTRA_OBJECT_VALIDATION
if (!dvmIsValidObject(obj)) {
LOGE("Invalid object %p\n", obj);
dvmAbort();
}
#endif
#ifndef NDEBUG
if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
/* probable heap corruption */
LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
dvmAbort();
}
#endif
return true;
}
/*
* Check to see if "obj" is NULL. If so, export the PC into the stack
* frame and throw an exception.
*
* Perform additional checks on debug builds.
*
* Use this to check for NULL when the instruction handler doesn't do
* anything else that can throw an exception.
*/
static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
{
if (obj == NULL) {
EXPORT_PC();
dvmThrowException("Ljava/lang/NullPointerException;", NULL);
return false;
}
#ifdef WITH_EXTRA_OBJECT_VALIDATION
if (!dvmIsValidObject(obj)) {
LOGE("Invalid object %p\n", obj);
dvmAbort();
}
#endif
#ifndef NDEBUG
if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
/* probable heap corruption */
LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
dvmAbort();
}
#endif
return true;
}
/* File: cstubs/stubdefs.c */
/* this is a standard (no debug support) interpreter */
#define INTERP_TYPE INTERP_STD
#define CHECK_DEBUG_AND_PROF() ((void)0)
# define CHECK_TRACKED_REFS() ((void)0)
#define CHECK_JIT_BOOL() (false)
#define CHECK_JIT_VOID()
#define ABORT_JIT_TSELECT() ((void)0)
/*
* In the C mterp stubs, "goto" is a function call followed immediately
* by a return.
*/
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
StackSaveArea* debugSaveArea;
#define GOTO_TARGET_END }
/*
* Redefine what used to be local variable accesses into MterpGlue struct
* references. (These are undefined down in "footer.c".)
*/
#define retval glue->retval
#define pc glue->pc
#define fp glue->fp
#define curMethod glue->method
#define methodClassDex glue->methodClassDex
#define self glue->self
#define debugTrackedRefStart glue->debugTrackedRefStart
/* ugh */
#define STUB_HACK(x) x
/*
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0);
#define OP_END }
/*
* Like the "portable" FINISH, but don't reload "inst", and return to caller
* when done.
*/
#define FINISH(_offset) { \
ADJUST_PC(_offset); \
CHECK_DEBUG_AND_PROF(); \
CHECK_TRACKED_REFS(); \
return; \
}
/*
* The "goto label" statements turn into function calls followed by
* return statements. Some of the functions take arguments, which in the
* portable interpreter are handled by assigning values to globals.
*/
#define GOTO_exceptionThrown() \
do { \
dvmMterp_exceptionThrown(glue); \
return; \
} while(false)
#define GOTO_returnFromMethod() \
do { \
dvmMterp_returnFromMethod(glue); \
return; \
} while(false)
#define GOTO_invoke(_target, _methodCallRange) \
do { \
dvmMterp_##_target(glue, _methodCallRange); \
return; \
} while(false)
#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
do { \
dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \
_vsrc1, _vdst); \
return; \
} while(false)
/*
* As a special case, "goto bail" turns into a longjmp. Use "bail_switch"
* if we need to switch to the other interpreter upon our return.
*/
#define GOTO_bail() \
dvmMterpStdBail(glue, false);
#define GOTO_bail_switch() \
dvmMterpStdBail(glue, true);
/*
* Periodically check for thread suspension.
*
* While we're at it, see if a debugger has attached or the profiler has
* started. If so, switch to a different "goto" table.
*/
#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
if (dvmCheckSuspendQuick(self)) { \
EXPORT_PC(); /* need for precise GC */ \
dvmCheckSuspendPending(self); \
} \
if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
ADJUST_PC(_pcadj); \
glue->entryPoint = _entryPoint; \
LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \
self->threadId, (_entryPoint), (_pcadj)); \
GOTO_bail_switch(); \
} \
}
/* File: c/opcommon.c */
/* forward declarations of goto targets */
GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
u2 count, u2 regs);
GOTO_TARGET_DECL(returnFromMethod);
GOTO_TARGET_DECL(exceptionThrown);
/*
* ===========================================================================
*
* What follows are opcode definitions shared between multiple opcodes with
* minor substitutions handled by the C pre-processor. These should probably
* use the mterp substitution mechanism instead, with the code here moved
* into common fragment files (like the asm "binop.S"), although it's hard
* to give up the C preprocessor in favor of the much simpler text subst.
*
* ===========================================================================
*/
#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER##_totype(vdst, \
GET_REGISTER##_fromtype(vsrc1)); \
FINISH(1);
#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
_tovtype, _tortype) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
{ \
/* spec defines specific handling for +/- inf and NaN values */ \
_fromvtype val; \
_tovtype intMin, intMax, result; \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
val = GET_REGISTER##_fromrtype(vsrc1); \
intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
intMax = ~intMin; \
result = (_tovtype) val; \
if (val >= intMax) /* +inf */ \
result = intMax; \
else if (val <= intMin) /* -inf */ \
result = intMin; \
else if (val != val) /* NaN */ \
result = 0; \
else \
result = (_tovtype) val; \
SET_REGISTER##_tortype(vdst, result); \
} \
FINISH(1);
#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
FINISH(1);
/* NOTE: the comparison result is always a signed 4-byte integer */
#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
int result; \
u2 regs; \
_varType val1, val2; \
vdst = INST_AA(inst); \
regs = FETCH(1); \
vsrc1 = regs & 0xff; \
vsrc2 = regs >> 8; \
ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
val1 = GET_REGISTER##_type(vsrc1); \
val2 = GET_REGISTER##_type(vsrc2); \
if (val1 == val2) \
result = 0; \
else if (val1 < val2) \
result = -1; \
else if (val1 > val2) \
result = 1; \
else \
result = (_nanVal); \
ILOGV("+ result=%d\n", result); \
SET_REGISTER(vdst, result); \
} \
FINISH(2);
#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
vsrc1 = INST_A(inst); \
vsrc2 = INST_B(inst); \
if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
int branchOffset = (s2)FETCH(1); /* sign-extended */ \
ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
branchOffset); \
ILOGV("> branch taken"); \
if (branchOffset < 0) \
PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
FINISH(branchOffset); \
} else { \
ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
FINISH(2); \
}
#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
vsrc1 = INST_AA(inst); \
if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
int branchOffset = (s2)FETCH(1); /* sign-extended */ \
ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
ILOGV("> branch taken"); \
if (branchOffset < 0) \
PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
FINISH(branchOffset); \
} else { \
ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
FINISH(2); \
}
#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
FINISH(1);
#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
if (_chkdiv != 0) { \
s4 firstVal, secondVal, result; \
firstVal = GET_REGISTER(vsrc1); \
secondVal = GET_REGISTER(vsrc2); \
if (secondVal == 0) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op secondVal; \
} \
SET_REGISTER(vdst, result); \
} else { \
/* non-div/rem case */ \
SET_REGISTER(vdst, \
(s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
} \
} \
FINISH(2);
#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER(vdst, \
_cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
} \
FINISH(2);
#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
vsrc2 = FETCH(1); \
ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
(_opname), vdst, vsrc1, vsrc2); \
if (_chkdiv != 0) { \
s4 firstVal, result; \
firstVal = GET_REGISTER(vsrc1); \
if ((s2) vsrc2 == 0) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
/* won't generate /lit16 instr for this; check anyway */ \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op (s2) vsrc2; \
} \
SET_REGISTER(vdst, result); \
} else { \
/* non-div/rem case */ \
SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
} \
FINISH(2);
#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
{ \
u2 litInfo; \
vdst = INST_AA(inst); \
litInfo = FETCH(1); \
vsrc1 = litInfo & 0xff; \
vsrc2 = litInfo >> 8; /* constant */ \
ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
(_opname), vdst, vsrc1, vsrc2); \
if (_chkdiv != 0) { \
s4 firstVal, result; \
firstVal = GET_REGISTER(vsrc1); \
if ((s1) vsrc2 == 0) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op ((s1) vsrc2); \
} \
SET_REGISTER(vdst, result); \
} else { \
SET_REGISTER(vdst, \
(s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
} \
} \
FINISH(2);
#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
{ \
u2 litInfo; \
vdst = INST_AA(inst); \
litInfo = FETCH(1); \
vsrc1 = litInfo & 0xff; \
vsrc2 = litInfo >> 8; /* constant */ \
ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
(_opname), vdst, vsrc1, vsrc2); \
SET_REGISTER(vdst, \
_cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
} \
FINISH(2);
#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
if (_chkdiv != 0) { \
s4 firstVal, secondVal, result; \
firstVal = GET_REGISTER(vdst); \
secondVal = GET_REGISTER(vsrc1); \
if (secondVal == 0) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op secondVal; \
} \
SET_REGISTER(vdst, result); \
} else { \
SET_REGISTER(vdst, \
(s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
} \
FINISH(1);
#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER(vdst, \
_cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
FINISH(1);
#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
if (_chkdiv != 0) { \
s8 firstVal, secondVal, result; \
firstVal = GET_REGISTER_WIDE(vsrc1); \
secondVal = GET_REGISTER_WIDE(vsrc2); \
if (secondVal == 0LL) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u8)firstVal == 0x8000000000000000ULL && \
secondVal == -1LL) \
{ \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op secondVal; \
} \
SET_REGISTER_WIDE(vdst, result); \
} else { \
SET_REGISTER_WIDE(vdst, \
(s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
} \
} \
FINISH(2);
#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
SET_REGISTER_WIDE(vdst, \
_cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
} \
FINISH(2);
#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
if (_chkdiv != 0) { \
s8 firstVal, secondVal, result; \
firstVal = GET_REGISTER_WIDE(vdst); \
secondVal = GET_REGISTER_WIDE(vsrc1); \
if (secondVal == 0LL) { \
EXPORT_PC(); \
dvmThrowException("Ljava/lang/ArithmeticException;", \
"divide by zero"); \
GOTO_exceptionThrown(); \
} \
if ((u8)firstVal == 0x8000000000000000ULL && \
secondVal == -1LL) \
{ \
if (_chkdiv == 1) \
result = firstVal; /* division */ \
else \
result = 0; /* remainder */ \
} else { \
result = firstVal _op secondVal; \
} \
SET_REGISTER_WIDE(vdst, result); \
} else { \
SET_REGISTER_WIDE(vdst, \
(s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
} \
FINISH(1);
#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER_WIDE(vdst, \
_cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
FINISH(1);
#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
SET_REGISTER_FLOAT(vdst, \
GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
} \
FINISH(2);
#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
u2 srcRegs; \
vdst = INST_AA(inst); \
srcRegs = FETCH(1); \
vsrc1 = srcRegs & 0xff; \
vsrc2 = srcRegs >> 8; \
ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
SET_REGISTER_DOUBLE(vdst, \
GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
} \
FINISH(2);
#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER_FLOAT(vdst, \
GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
FINISH(1);
#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
HANDLE_OPCODE(_opcode /*vA, vB*/) \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); \
ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
SET_REGISTER_DOUBLE(vdst, \
GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
FINISH(1);
#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
ArrayObject* arrayObj; \
u2 arrayInfo; \
EXPORT_PC(); \
vdst = INST_AA(inst); \
arrayInfo = FETCH(1); \
vsrc1 = arrayInfo & 0xff; /* array ptr */ \
vsrc2 = arrayInfo >> 8; /* index */ \
ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
if (!checkForNull((Object*) arrayObj)) \
GOTO_exceptionThrown(); \
if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
LOGV("Invalid array access: %p %d (len=%d)\n", \
arrayObj, vsrc2, arrayObj->length); \
dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
NULL); \
GOTO_exceptionThrown(); \
} \
SET_REGISTER##_regsize(vdst, \
((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
} \
FINISH(2);
#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
{ \
ArrayObject* arrayObj; \
u2 arrayInfo; \
EXPORT_PC(); \
vdst = INST_AA(inst); /* AA: source value */ \
arrayInfo = FETCH(1); \
vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
vsrc2 = arrayInfo >> 8; /* CC: index */ \
ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
if (!checkForNull((Object*) arrayObj)) \
GOTO_exceptionThrown(); \
if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
NULL); \
GOTO_exceptionThrown(); \
} \
ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
GET_REGISTER##_regsize(vdst); \
} \
FINISH(2);
/*
* It's possible to get a bad value out of a field with sub-32-bit stores
* because the -quick versions always operate on 32 bits. Consider:
* short foo = -1 (sets a 32-bit register to 0xffffffff)
* iput-quick foo (writes all 32 bits to the field)
* short bar = 1 (sets a 32-bit register to 0x00000001)
* iput-short (writes the low 16 bits to the field)
* iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
* This can only happen when optimized and non-optimized code has interleaved
* access to the same field. This is unlikely but possible.
*
* The easiest way to fix this is to always read/write 32 bits at a time. On
* a device with a 16-bit data bus this is sub-optimal. (The alternative
* approach is to have sub-int versions of iget-quick, but now we're wasting
* Dalvik instruction space and making it less likely that handler code will
* already be in the CPU i-cache.)
*/
#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
{ \
InstField* ifield; \
Object* obj; \
EXPORT_PC(); \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); /* object ptr */ \
ref = FETCH(1); /* field ref */ \
ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
obj = (Object*) GET_REGISTER(vsrc1); \
if (!checkForNull(obj)) \
GOTO_exceptionThrown(); \
ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
if (ifield == NULL) { \
ifield = dvmResolveInstField(curMethod->clazz, ref); \
if (ifield == NULL) \
GOTO_exceptionThrown(); \
} \
SET_REGISTER##_regsize(vdst, \
dvmGetField##_ftype(obj, ifield->byteOffset)); \
ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
(u8) GET_REGISTER##_regsize(vdst)); \
UPDATE_FIELD_GET(&ifield->field); \
} \
FINISH(2);
#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
{ \
Object* obj; \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); /* object ptr */ \
ref = FETCH(1); /* field offset */ \
ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
(_opname), vdst, vsrc1, ref); \
obj = (Object*) GET_REGISTER(vsrc1); \
if (!checkForNullExportPC(obj, fp, pc)) \
GOTO_exceptionThrown(); \
SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
ILOGV("+ IGETQ %d=0x%08llx", ref, \
(u8) GET_REGISTER##_regsize(vdst)); \
} \
FINISH(2);
#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
{ \
InstField* ifield; \
Object* obj; \
EXPORT_PC(); \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); /* object ptr */ \
ref = FETCH(1); /* field ref */ \
ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
obj = (Object*) GET_REGISTER(vsrc1); \
if (!checkForNull(obj)) \
GOTO_exceptionThrown(); \
ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
if (ifield == NULL) { \
ifield = dvmResolveInstField(curMethod->clazz, ref); \
if (ifield == NULL) \
GOTO_exceptionThrown(); \
} \
dvmSetField##_ftype(obj, ifield->byteOffset, \
GET_REGISTER##_regsize(vdst)); \
ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
(u8) GET_REGISTER##_regsize(vdst)); \
UPDATE_FIELD_PUT(&ifield->field); \
} \
FINISH(2);
#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
{ \
Object* obj; \
vdst = INST_A(inst); \
vsrc1 = INST_B(inst); /* object ptr */ \
ref = FETCH(1); /* field offset */ \
ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
(_opname), vdst, vsrc1, ref); \
obj = (Object*) GET_REGISTER(vsrc1); \
if (!checkForNullExportPC(obj, fp, pc)) \
GOTO_exceptionThrown(); \
dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
ILOGV("+ IPUTQ %d=0x%08llx", ref, \
(u8) GET_REGISTER##_regsize(vdst)); \
} \
FINISH(2);
/*
* The JIT needs dvmDexGetResolvedField() to return non-null.
* Since we use the portable interpreter to build the trace, the extra
* checks in HANDLE_SGET_X and HANDLE_SPUT_X are not needed for mterp.
*/
#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
{ \
StaticField* sfield; \
vdst = INST_AA(inst); \
ref = FETCH(1); /* field ref */ \
ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
if (sfield == NULL) { \
EXPORT_PC(); \
sfield = dvmResolveStaticField(curMethod->clazz, ref); \
if (sfield == NULL) \
GOTO_exceptionThrown(); \
if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
ABORT_JIT_TSELECT(); \
} \
} \
SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
ILOGV("+ SGET '%s'=0x%08llx", \
sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
UPDATE_FIELD_GET(&sfield->field); \
} \
FINISH(2);
#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
{ \
StaticField* sfield; \
vdst = INST_AA(inst); \
ref = FETCH(1); /* field ref */ \
ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
if (sfield == NULL) { \
EXPORT_PC(); \
sfield = dvmResolveStaticField(curMethod->clazz, ref); \
if (sfield == NULL) \
GOTO_exceptionThrown(); \
if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
ABORT_JIT_TSELECT(); \
} \
} \
dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
ILOGV("+ SPUT '%s'=0x%08llx", \
sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
UPDATE_FIELD_PUT(&sfield->field); \
} \
FINISH(2);
/* File: c/OP_IGET_WIDE_VOLATILE.c */
HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
OP_END
/* File: c/OP_IPUT_WIDE_VOLATILE.c */
HANDLE_IPUT_X(OP_IPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
OP_END
/* File: c/OP_SGET_WIDE_VOLATILE.c */
HANDLE_SGET_X(OP_SGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
OP_END
/* File: c/OP_SPUT_WIDE_VOLATILE.c */
HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
OP_END
/* File: c/OP_EXECUTE_INLINE_RANGE.c */
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
{
u4 arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
EXPORT_PC();
vsrc1 = INST_AA(inst); /* #of args */
ref = FETCH(1); /* inline call "ref" */
vdst = FETCH(2); /* range base */
ILOGV("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
assert(vsrc1 <= 4);
switch (vsrc1) {
case 4:
arg3 = GET_REGISTER(vdst+3);
/* fall through */
case 3:
arg2 = GET_REGISTER(vdst+2);
/* fall through */
case 2:
arg1 = GET_REGISTER(vdst+1);
/* fall through */
case 1:
arg0 = GET_REGISTER(vdst+0);
/* fall through */
default: // case 0
;
}
#if INTERP_TYPE == INTERP_DBG
if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
#else
if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
GOTO_exceptionThrown();
#endif
}
FINISH(3);
OP_END
/* File: c/gotoTargets.c */
/*
* C footer. This has some common code shared by the various targets.
*/
/*
* Everything from here on is a "goto target". In the basic interpreter
* we jump into these targets and then jump directly to the handler for
* next instruction. Here, these are subroutines that return to the caller.
*/
GOTO_TARGET(filledNewArray, bool methodCallRange)
{
ClassObject* arrayClass;
ArrayObject* newArray;
u4* contents;
char typeCh;
int i;
u4 arg5;
EXPORT_PC();
ref = FETCH(1); /* class ref */
vdst = FETCH(2); /* first 4 regs -or- range base */
if (methodCallRange) {
vsrc1 = INST_AA(inst); /* #of elements */
arg5 = -1; /* silence compiler warning */
ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
} else {
arg5 = INST_A(inst);
vsrc1 = INST_B(inst); /* #of elements */
ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
vsrc1, ref, vdst, arg5);
}
/*
* Resolve the array class.
*/
arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
if (arrayClass == NULL) {
arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
if (arrayClass == NULL)
GOTO_exceptionThrown();
}
/*
if (!dvmIsArrayClass(arrayClass)) {
dvmThrowException("Ljava/lang/RuntimeError;",
"filled-new-array needs array class");
GOTO_exceptionThrown();
}
*/
/* verifier guarantees this is an array class */
assert(dvmIsArrayClass(arrayClass));
assert(dvmIsClassInitialized(arrayClass));
/*
* Create an array of the specified type.
*/
LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
typeCh = arrayClass->descriptor[1];
if (typeCh == 'D' || typeCh == 'J') {
/* category 2 primitives not allowed */
dvmThrowException("Ljava/lang/RuntimeError;",
"bad filled array req");
GOTO_exceptionThrown();
} else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
/* TODO: requires multiple "fill in" loops with different widths */
LOGE("non-int primitives not implemented\n");
dvmThrowException("Ljava/lang/InternalError;",
"filled-new-array not implemented for anything but 'int'");
GOTO_exceptionThrown();
}
newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
if (newArray == NULL)
GOTO_exceptionThrown();
/*
* Fill in the elements. It's legal for vsrc1 to be zero.
*/
contents = (u4*) newArray->contents;
if (methodCallRange) {
for (i = 0; i < vsrc1; i++)
contents[i] = GET_REGISTER(vdst+i);
} else {
assert(vsrc1 <= 5);
if (vsrc1 == 5) {
contents[4] = GET_REGISTER(arg5);
vsrc1--;
}
for (i = 0; i < vsrc1; i++) {
contents[i] = GET_REGISTER(vdst & 0x0f);
vdst >>= 4;
}
}
if (typeCh == 'L' || typeCh == '[') {
dvmWriteBarrierArray(newArray, 0, newArray->length);
}
retval.l = newArray;
}
FINISH(3);
GOTO_TARGET_END
GOTO_TARGET(invokeVirtual, bool methodCallRange)
{
Method* baseMethod;
Object* thisPtr;
EXPORT_PC();
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* method ref */
vdst = FETCH(2); /* 4 regs -or- first reg */
/*
* The object against which we are executing a method is always
* in the first argument.
*/
if (methodCallRange) {
assert(vsrc1 > 0);
ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisPtr = (Object*) GET_REGISTER(vdst);
} else {
assert((vsrc1>>4) > 0);
ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
}
if (!checkForNull(thisPtr))
GOTO_exceptionThrown();
/*
* Resolve the method. This is the correct method for the static
* type of the object. We also verify access permissions here.
*/
baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
if (baseMethod == NULL) {
baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
if (baseMethod == NULL) {
ILOGV("+ unknown method or access denied\n");
GOTO_exceptionThrown();
}
}
/*
* Combine the object we found with the vtable offset in the
* method.
*/
assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
#if defined(WITH_JIT) && (INTERP_TYPE == INTERP_DBG)
callsiteClass = thisPtr->clazz;
#endif
#if 0
if (dvmIsAbstractMethod(methodToCall)) {
/*
* This can happen if you create two classes, Base and Sub, where
* Sub is a sub-class of Base. Declare a protected abstract
* method foo() in Base, and invoke foo() from a method in Base.
* Base is an "abstract base class" and is never instantiated
* directly. Now, Override foo() in Sub, and use Sub. This
* Works fine unless Sub stops providing an implementation of
* the method.
*/
dvmThrowException("Ljava/lang/AbstractMethodError;",
"abstract method not implemented");
GOTO_exceptionThrown();
}
#else
assert(!dvmIsAbstractMethod(methodToCall) ||
methodToCall->nativeFunc != NULL);
#endif
LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
baseMethod->clazz->descriptor, baseMethod->name,
(u4) baseMethod->methodIndex,
methodToCall->clazz->descriptor, methodToCall->name);
assert(methodToCall != NULL);
#if 0
if (vsrc1 != methodToCall->insSize) {
LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
baseMethod->clazz->descriptor, baseMethod->name,
(u4) baseMethod->methodIndex,
methodToCall->clazz->descriptor, methodToCall->name);
//dvmDumpClass(baseMethod->clazz);
//dvmDumpClass(methodToCall->clazz);
dvmDumpAllClasses(0);
}
#endif
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
GOTO_TARGET(invokeSuper, bool methodCallRange)
{
Method* baseMethod;
u2 thisReg;
EXPORT_PC();
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* method ref */
vdst = FETCH(2); /* 4 regs -or- first reg */
if (methodCallRange) {
ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisReg = vdst;
} else {
ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisReg = vdst & 0x0f;
}
/* impossible in well-formed code, but we must check nevertheless */
if (!checkForNull((Object*) GET_REGISTER(thisReg)))
GOTO_exceptionThrown();
/*
* Resolve the method. This is the correct method for the static
* type of the object. We also verify access permissions here.
* The first arg to dvmResolveMethod() is just the referring class
* (used for class loaders and such), so we don't want to pass
* the superclass into the resolution call.
*/
baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
if (baseMethod == NULL) {
baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
if (baseMethod == NULL) {
ILOGV("+ unknown method or access denied\n");
GOTO_exceptionThrown();
}
}
/*
* Combine the object we found with the vtable offset in the
* method's class.
*
* We're using the current method's class' superclass, not the
* superclass of "this". This is because we might be executing
* in a method inherited from a superclass, and we want to run
* in that class' superclass.
*/
if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
/*
* Method does not exist in the superclass. Could happen if
* superclass gets updated.
*/
dvmThrowException("Ljava/lang/NoSuchMethodError;",
baseMethod->name);
GOTO_exceptionThrown();
}
methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
#if 0
if (dvmIsAbstractMethod(methodToCall)) {
dvmThrowException("Ljava/lang/AbstractMethodError;",
"abstract method not implemented");
GOTO_exceptionThrown();
}
#else
assert(!dvmIsAbstractMethod(methodToCall) ||
methodToCall->nativeFunc != NULL);
#endif
LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
baseMethod->clazz->descriptor, baseMethod->name,
methodToCall->clazz->descriptor, methodToCall->name);
assert(methodToCall != NULL);
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
GOTO_TARGET(invokeInterface, bool methodCallRange)
{
Object* thisPtr;
ClassObject* thisClass;
EXPORT_PC();
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* method ref */
vdst = FETCH(2); /* 4 regs -or- first reg */
/*
* The object against which we are executing a method is always
* in the first argument.
*/
if (methodCallRange) {
assert(vsrc1 > 0);
ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisPtr = (Object*) GET_REGISTER(vdst);
} else {
assert((vsrc1>>4) > 0);
ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
}
if (!checkForNull(thisPtr))
GOTO_exceptionThrown();
thisClass = thisPtr->clazz;
#if defined(WITH_JIT) && (INTERP_TYPE == INTERP_DBG)
callsiteClass = thisClass;
#endif
/*
* Given a class and a method index, find the Method* with the
* actual code we want to execute.
*/
methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
methodClassDex);
if (methodToCall == NULL) {
assert(dvmCheckException(self));
GOTO_exceptionThrown();
}
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
GOTO_TARGET(invokeDirect, bool methodCallRange)
{
u2 thisReg;
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* method ref */
vdst = FETCH(2); /* 4 regs -or- first reg */
EXPORT_PC();
if (methodCallRange) {
ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisReg = vdst;
} else {
ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisReg = vdst & 0x0f;
}
if (!checkForNull((Object*) GET_REGISTER(thisReg)))
GOTO_exceptionThrown();
methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
if (methodToCall == NULL) {
methodToCall = dvmResolveMethod(curMethod->clazz, ref,
METHOD_DIRECT);
if (methodToCall == NULL) {
ILOGV("+ unknown direct method\n"); // should be impossible
GOTO_exceptionThrown();
}
}
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
GOTO_TARGET(invokeStatic, bool methodCallRange)
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* method ref */
vdst = FETCH(2); /* 4 regs -or- first reg */
EXPORT_PC();
if (methodCallRange)
ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
else
ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
if (methodToCall == NULL) {
methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
if (methodToCall == NULL) {
ILOGV("+ unknown method\n");
GOTO_exceptionThrown();
}
/*
* The JIT needs dvmDexGetResolvedMethod() to return non-null.
* Since we use the portable interpreter to build the trace, this extra
* check is not needed for mterp.
*/
if (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL) {
/* Class initialization is still ongoing */
ABORT_JIT_TSELECT();
}
}
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
GOTO_TARGET_END
GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
{
Object* thisPtr;
EXPORT_PC();
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* vtable index */
vdst = FETCH(2); /* 4 regs -or- first reg */
/*
* The object against which we are executing a method is always
* in the first argument.
*/
if (methodCallRange) {
assert(vsrc1 > 0);
ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisPtr = (Object*) GET_REGISTER(vdst);
} else {
assert((vsrc1>>4) > 0);
ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
}
if (!checkForNull(thisPtr))
GOTO_exceptionThrown();
#if defined(WITH_JIT) && (INTERP_TYPE == INTERP_DBG)
callsiteClass = thisPtr->clazz;
#endif
/*
* Combine the object we found with the vtable offset in the
* method.
*/
assert(ref < thisPtr->clazz->vtableCount);
methodToCall = thisPtr->clazz->vtable[ref];
#if 0
if (dvmIsAbstractMethod(methodToCall)) {
dvmThrowException("Ljava/lang/AbstractMethodError;",
"abstract method not implemented");
GOTO_exceptionThrown();
}
#else
assert(!dvmIsAbstractMethod(methodToCall) ||
methodToCall->nativeFunc != NULL);
#endif
LOGVV("+++ virtual[%d]=%s.%s\n",
ref, methodToCall->clazz->descriptor, methodToCall->name);
assert(methodToCall != NULL);
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
{
u2 thisReg;
EXPORT_PC();
vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
ref = FETCH(1); /* vtable index */
vdst = FETCH(2); /* 4 regs -or- first reg */
if (methodCallRange) {
ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
vsrc1, ref, vdst, vdst+vsrc1-1);
thisReg = vdst;
} else {
ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
thisReg = vdst & 0x0f;
}
/* impossible in well-formed code, but we must check nevertheless */
if (!checkForNull((Object*) GET_REGISTER(thisReg)))
GOTO_exceptionThrown();
#if 0 /* impossible in optimized + verified code */
if (ref >= curMethod->clazz->super->vtableCount) {
dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
GOTO_exceptionThrown();
}
#else
assert(ref < curMethod->clazz->super->vtableCount);
#endif
/*
* Combine the object we found with the vtable offset in the
* method's class.
*
* We're using the current method's class' superclass, not the
* superclass of "this". This is because we might be executing
* in a method inherited from a superclass, and we want to run
* in the method's class' superclass.
*/
methodToCall = curMethod->clazz->super->vtable[ref];
#if 0
if (dvmIsAbstractMethod(methodToCall)) {
dvmThrowException("Ljava/lang/AbstractMethodError;",
"abstract method not implemented");
GOTO_exceptionThrown();
}
#else
assert(!dvmIsAbstractMethod(methodToCall) ||
methodToCall->nativeFunc != NULL);
#endif
LOGVV("+++ super-virtual[%d]=%s.%s\n",
ref, methodToCall->clazz->descriptor, methodToCall->name);
assert(methodToCall != NULL);
GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
}
GOTO_TARGET_END
/*
* General handling for return-void, return, and return-wide. Put the
* return value in "retval" before jumping here.
*/
GOTO_TARGET(returnFromMethod)
{
StackSaveArea* saveArea;
/*
* We must do this BEFORE we pop the previous stack frame off, so
* that the GC can see the return value (if any) in the local vars.
*
* Since this is now an interpreter switch point, we must do it before
* we do anything at all.
*/
PERIODIC_CHECKS(kInterpEntryReturn, 0);
ILOGV("> retval=0x%llx (leaving %s.%s %s)",
retval.j, curMethod->clazz->descriptor, curMethod->name,
curMethod->shorty);
//DUMP_REGS(curMethod, fp);
saveArea = SAVEAREA_FROM_FP(fp);
#ifdef EASY_GDB
debugSaveArea = saveArea;
#endif
#if (INTERP_TYPE == INTERP_DBG)
TRACE_METHOD_EXIT(self, curMethod);
#endif
/* back up to previous frame and see if we hit a break */
fp = saveArea->prevFrame;
assert(fp != NULL);
if (dvmIsBreakFrame(fp)) {
/* bail without popping the method frame from stack */
LOGVV("+++ returned into break frame\n");
#if defined(WITH_JIT)
/* Let the Jit know the return is terminating normally */
CHECK_JIT_VOID();
#endif
GOTO_bail();
}
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
curMethod->name, curMethod->shorty);
/* use FINISH on the caller's invoke instruction */
//u2 invokeInstr = INST_INST(FETCH(0));
if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
invokeInstr <= OP_INVOKE_INTERFACE*/)
{
FINISH(3);
} else {
//LOGE("Unknown invoke instr %02x at %d\n",
// invokeInstr, (int) (pc - curMethod->insns));
assert(false);
}
}
GOTO_TARGET_END
/*
* Jump here when the code throws an exception.
*
* By the time we get here, the Throwable has been created and the stack
* trace has been saved off.
*/
GOTO_TARGET(exceptionThrown)
{
Object* exception;
int catchRelPc;
/*
* Since this is now an interpreter switch point, we must do it before
* we do anything at all.
*/
PERIODIC_CHECKS(kInterpEntryThrow, 0);
#if defined(WITH_JIT)
// Something threw during trace selection - abort the current trace
ABORT_JIT_TSELECT();
#endif
/*
* We save off the exception and clear the exception status. While
* processing the exception we might need to load some Throwable
* classes, and we don't want class loader exceptions to get
* confused with this one.
*/
assert(dvmCheckException(self));
exception = dvmGetException(self);
dvmAddTrackedAlloc(exception, self);
dvmClearException(self);
LOGV("Handling exception %s at %s:%d\n",
exception->clazz->descriptor, curMethod->name,
dvmLineNumFromPC(curMethod, pc - curMethod->insns));
#if (INTERP_TYPE == INTERP_DBG)
/*
* Tell the debugger about it.
*
* TODO: if the exception was thrown by interpreted code, control
* fell through native, and then back to us, we will report the
* exception at the point of the throw and again here. We can avoid
* this by not reporting exceptions when we jump here directly from
* the native call code above, but then we won't report exceptions
* that were thrown *from* the JNI code (as opposed to *through* it).
*
* The correct solution is probably to ignore from-native exceptions
* here, and have the JNI exception code do the reporting to the
* debugger.
*/
if (gDvm.debuggerActive) {
void* catchFrame;
catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
exception, true, &catchFrame);
dvmDbgPostException(fp, pc - curMethod->insns, catchFrame,
catchRelPc, exception);
}
#endif
/*
* We need to unroll to the catch block or the nearest "break"
* frame.
*
* A break frame could indicate that we have reached an intermediate
* native call, or have gone off the top of the stack and the thread
* needs to exit. Either way, we return from here, leaving the
* exception raised.
*
* If we do find a catch block, we want to transfer execution to
* that point.
*
* Note this can cause an exception while resolving classes in
* the "catch" blocks.
*/
catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
exception, false, (void*)&fp);
/*
* Restore the stack bounds after an overflow. This isn't going to
* be correct in all circumstances, e.g. if JNI code devours the
* exception this won't happen until some other exception gets
* thrown. If the code keeps pushing the stack bounds we'll end
* up aborting the VM.
*
* Note we want to do this *after* the call to dvmFindCatchBlock,
* because that may need extra stack space to resolve exception
* classes (e.g. through a class loader).
*
* It's possible for the stack overflow handling to cause an
* exception (specifically, class resolution in a "catch" block
* during the call above), so we could see the thread's overflow
* flag raised but actually be running in a "nested" interpreter
* frame. We don't allow doubled-up StackOverflowErrors, so
* we can check for this by just looking at the exception type
* in the cleanup function. Also, we won't unroll past the SOE
* point because the more-recent exception will hit a break frame
* as it unrolls to here.
*/
if (self->stackOverflowed)
dvmCleanupStackOverflow(self, exception);
if (catchRelPc < 0) {
/* falling through to JNI code or off the bottom of the stack */
#if DVM_SHOW_EXCEPTION >= 2
LOGD("Exception %s from %s:%d not caught locally\n",
exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
dvmLineNumFromPC(curMethod, pc - curMethod->insns));
#endif
dvmSetException(self, exception);
dvmReleaseTrackedAlloc(exception, self);
GOTO_bail();
}
#if DVM_SHOW_EXCEPTION >= 3
{
const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
LOGD("Exception %s thrown from %s:%d to %s:%d\n",
exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
dvmLineNumFromPC(curMethod, pc - curMethod->insns),
dvmGetMethodSourceFile(catchMethod),
dvmLineNumFromPC(catchMethod, catchRelPc));
}
#endif
/*
* Adjust local variables to match self->curFrame and the
* updated PC.
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
curMethod->name, curMethod->shorty);
DUMP_REGS(curMethod, fp, false); // show all regs
/*
* Restore the exception if the handler wants it.
*
* The Dalvik spec mandates that, if an exception handler wants to
* do something with the exception, the first instruction executed
* must be "move-exception". We can pass the exception along
* through the thread struct, and let the move-exception instruction
* clear it for us.
*
* If the handler doesn't call move-exception, we don't want to
* finish here with an exception still pending.
*/
if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
dvmSetException(self, exception);
dvmReleaseTrackedAlloc(exception, self);
FINISH(0);
}
GOTO_TARGET_END
/*
* General handling for invoke-{virtual,super,direct,static,interface},
* including "quick" variants.
*
* Set "methodToCall" to the Method we're calling, and "methodCallRange"
* depending on whether this is a "/range" instruction.
*
* For a range call:
* "vsrc1" holds the argument count (8 bits)
* "vdst" holds the first argument in the range
* For a non-range call:
* "vsrc1" holds the argument count (4 bits) and the 5th argument index
* "vdst" holds four 4-bit register indices
*
* The caller must EXPORT_PC before jumping here, because any method
* call can throw a stack overflow exception.
*/
GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
u2 count, u2 regs)
{
STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
//printf("range=%d call=%p count=%d regs=0x%04x\n",
// methodCallRange, methodToCall, count, regs);
//printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
// methodToCall->name, methodToCall->shorty);
u4* outs;
int i;
/*
* Copy args. This may corrupt vsrc1/vdst.
*/
if (methodCallRange) {
// could use memcpy or a "Duff's device"; most functions have
// so few args it won't matter much
assert(vsrc1 <= curMethod->outsSize);
assert(vsrc1 == methodToCall->insSize);
outs = OUTS_FROM_FP(fp, vsrc1);
for (i = 0; i < vsrc1; i++)
outs[i] = GET_REGISTER(vdst+i);
} else {
u4 count = vsrc1 >> 4;
assert(count <= curMethod->outsSize);
assert(count == methodToCall->insSize);
assert(count <= 5);
outs = OUTS_FROM_FP(fp, count);
#if 0
if (count == 5) {
outs[4] = GET_REGISTER(vsrc1 & 0x0f);
count--;
}
for (i = 0; i < (int) count; i++) {
outs[i] = GET_REGISTER(vdst & 0x0f);
vdst >>= 4;
}
#else
// This version executes fewer instructions but is larger
// overall. Seems to be a teensy bit faster.
assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
switch (count) {
case 5:
outs[4] = GET_REGISTER(vsrc1 & 0x0f);
case 4:
outs[3] = GET_REGISTER(vdst >> 12);
case 3:
outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
case 2:
outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
case 1:
outs[0] = GET_REGISTER(vdst & 0x0f);
default:
;
}
#endif
}
}
/*
* (This was originally a "goto" target; I've kept it separate from the
* stuff above in case we want to refactor things again.)
*
* At this point, we have the arguments stored in the "outs" area of
* the current method's stack frame, and the method to call in
* "methodToCall". Push a new stack frame.
*/
{
StackSaveArea* newSaveArea;
u4* newFp;
ILOGV("> %s%s.%s %s",
dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
methodToCall->clazz->descriptor, methodToCall->name,
methodToCall->shorty);
newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
newSaveArea = SAVEAREA_FROM_FP(newFp);
/* verify that we have enough space */
if (true) {
u1* bottom;
bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
if (bottom < self->interpStackEnd) {
/* stack overflow */
LOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')\n",
self->interpStackStart, self->interpStackEnd, bottom,
(u1*) fp - bottom, self->interpStackSize,
methodToCall->name);
dvmHandleStackOverflow(self, methodToCall);
assert(dvmCheckException(self));
GOTO_exceptionThrown();
}
//LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
// fp, newFp, newSaveArea, bottom);
}
#ifdef LOG_INSTR
if (methodToCall->registersSize > methodToCall->insSize) {
/*
* This makes valgrind quiet when we print registers that
* haven't been initialized. Turn it off when the debug
* messages are disabled -- we want valgrind to report any
* used-before-initialized issues.
*/
memset(newFp, 0xcc,
(methodToCall->registersSize - methodToCall->insSize) * 4);
}
#endif
#ifdef EASY_GDB
newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
#endif
newSaveArea->prevFrame = fp;
newSaveArea->savedPc = pc;
#if defined(WITH_JIT)
newSaveArea->returnAddr = 0;
#endif
newSaveArea->method = methodToCall;
if (!dvmIsNativeMethod(methodToCall)) {
/*
* "Call" interpreted code. Reposition the PC, update the
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
fp = self->curFrame = newFp;
#ifdef EASY_GDB
debugSaveArea = SAVEAREA_FROM_FP(newFp);
#endif
#if INTERP_TYPE == INTERP_DBG
debugIsMethodEntry = true; // profiling, debugging
#endif
ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
curMethod->name, curMethod->shorty);
DUMP_REGS(curMethod, fp, true); // show input args
FINISH(0); // jump to method start
} else {
/* set this up for JNI locals, even if not a JNI native */
#ifdef USE_INDIRECT_REF
newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
#else
newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.nextEntry;
#endif
self->curFrame = newFp;
DUMP_REGS(methodToCall, newFp, true); // show input args
#if (INTERP_TYPE == INTERP_DBG)
if (gDvm.debuggerActive) {
dvmDbgPostLocationEvent(methodToCall, -1,
dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
}
#endif
#if (INTERP_TYPE == INTERP_DBG)
TRACE_METHOD_ENTER(self, methodToCall);
#endif
{
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
methodToCall->name, methodToCall->shorty);
}
#if defined(WITH_JIT)
/* Allow the Jit to end any pending trace building */
CHECK_JIT_VOID();
#endif
/*
* Jump through native call bridge. Because we leave no
* space for locals on native calls, "newFp" points directly
* to the method arguments.
*/
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
#if (INTERP_TYPE == INTERP_DBG)
if (gDvm.debuggerActive) {
dvmDbgPostLocationEvent(methodToCall, -1,
dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
}
#endif
#if (INTERP_TYPE == INTERP_DBG)
TRACE_METHOD_EXIT(self, methodToCall);
#endif
/* pop frame off */
dvmPopJniLocals(self, newSaveArea);
self->curFrame = fp;
/*
* If the native code threw an exception, or interpreted code
* invoked by the native call threw one and nobody has cleared
* it, jump to our local exception handling.
*/
if (dvmCheckException(self)) {
LOGV("Exception thrown by/below native code\n");
GOTO_exceptionThrown();
}
ILOGD("> retval=0x%llx (leaving native)", retval.j);
ILOGD("> (return from native %s.%s to %s.%s %s)",
methodToCall->clazz->descriptor, methodToCall->name,
curMethod->clazz->descriptor, curMethod->name,
curMethod->shorty);
//u2 invokeInstr = INST_INST(FETCH(0));
if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
invokeInstr <= OP_INVOKE_INTERFACE*/)
{
FINISH(3);
} else {
//LOGE("Unknown invoke instr %02x at %d\n",
// invokeInstr, (int) (pc - curMethod->insns));
assert(false);
}
}
}
assert(false); // should not get here
GOTO_TARGET_END
/* File: cstubs/enddefs.c */
/* undefine "magic" name remapping */
#undef retval
#undef pc
#undef fp
#undef curMethod
#undef methodClassDex
#undef self
#undef debugTrackedRefStart