code = assembler.GenerateCode(); \ size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \ dispatch_table_[index] = code->entry(); \ TraceCodegen(code); \ PROFILE( \ isolate_, \ CodeCreateEvent( \ CodeEventListener::BYTECODE_HANDLER_TAG, \ AbstractCode::cast(*code), \ Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \ } \ } BYTECODE_LIST(GENERATE_CODE) #undef GENERATE_CODE } // Fill unused entries will the illegal bytecode handler. size_t illegal_index = GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle); for (size_t index = 0; index < arraysize(dispatch_table_); ++index) { if (dispatch_table_[index] == nullptr) { dispatch_table_[index] = dispatch_table_[illegal_index]; } } } Code* Interpreter::GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale) { DCHECK(IsDispatchTableInitialized()); DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); size_t index = GetDispatchTableIndex(bytecode, operand_scale); Address code_entry = dispatch_table_[index]; return Code::GetCodeFromTargetAddress(code_entry); } // static size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode, OperandScale operand_scale) { static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte; size_t index = static_cast(bytecode); switch (operand_scale) { case OperandScale::kSingle: return index; case OperandScale::kDouble: return index + kEntriesPerOperandScale; case OperandScale::kQuadruple: return index + 2 * kEntriesPerOperandScale; } UNREACHABLE(); return 0; } void Interpreter::IterateDispatchTable(ObjectVisitor* v) { for (int i = 0; i < kDispatchTableSize; i++) { Address code_entry = dispatch_table_[i]; Object* code = code_entry == nullptr ? nullptr : Code::GetCodeFromTargetAddress(code_entry); Object* old_code = code; v->VisitPointer(&code); if (code != old_code) { dispatch_table_[i] = reinterpret_cast(code)->entry(); } } } // static int Interpreter::InterruptBudget() { // TODO(ignition): Tune code size multiplier. const int kCodeSizeMultiplier = 32; return FLAG_interrupt_budget * kCodeSizeMultiplier; } bool Interpreter::MakeBytecode(CompilationInfo* info) { RuntimeCallTimerScope runtimeTimer(info->isolate(), &RuntimeCallStats::CompileIgnition); TimerEventScope timer(info->isolate()); TRACE_EVENT0("v8", "V8.CompileIgnition"); if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) { OFStream os(stdout); base::SmartArrayPointer name = info->GetDebugName(); os << "[generating bytecode for function: " << info->GetDebugName().get() << "]" << std::endl << std::flush; } #ifdef DEBUG if (info->parse_info() && FLAG_print_source) { OFStream os(stdout); os << "--- Source from AST ---" << std::endl << PrettyPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl << std::flush; } if (info->parse_info() && FLAG_print_ast) { OFStream os(stdout); os << "--- AST ---" << std::endl << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl << std::flush; } #endif // DEBUG BytecodeGenerator generator(info); Handle bytecodes = generator.MakeBytecode(); if (generator.HasStackOverflow()) return false; if (FLAG_print_bytecode) { OFStream os(stdout); bytecodes->Print(os); os << std::flush; } info->SetBytecodeArray(bytecodes); info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline()); return true; } bool Interpreter::IsDispatchTableInitialized() { if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || FLAG_trace_ignition_dispatches) { // Regenerate table to add bytecode tracing operations, print the assembly // code generated by TurboFan or instrument handlers with dispatch counters. return false; } return dispatch_table_[0] != nullptr; } void Interpreter::TraceCodegen(Handle code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* raw_int = __ BytecodeOperandImm(0); Node* smi_int = __ SmiTag(raw_int); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdrUndefined // // Loads undefined into the accumulator and |reg|. void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* destination = __ BytecodeOperandReg(0); __ StoreRegister(undefined_value, destination); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } Node* Interpreter::BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); // Load the global via the LoadGlobalIC. Node* code_target = __ HeapConstant(ic.code()); Node* raw_slot = __ BytecodeOperandIdx(0); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); return __ CallStub(ic.descriptor(), code_target, context, smi_slot, type_feedback_vector); } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrGlobal // // Load the global with name in constant pool entry into // register using FeedBackVector slot outside of a typeof. void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); Node* destination = __ BytecodeOperandReg(1); __ StoreRegister(result, destination); __ Dispatch(); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } compiler::Node* Interpreter::BuildLoadContextSlot( InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); return __ LoadContextSlot(context, slot_index); } // LdaContextSlot // // Load the object in |slot_index| of |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrContextSlot // // Load the object in of into register . void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); __ StoreContextSlot(context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } Node* Interpreter::BuildLoadNamedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry and puts the result into register . void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(3); __ StoreRegister(result, destination); __ Dispatch(); } Node* Interpreter::BuildLoadKeyedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrKeyedProperty // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator and puts the result in register . void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } template void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, lhs, rhs, context); __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } void Interpreter::DoUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, accumulator); __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, value, context); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Cast the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToName(isolate_), assembler); } // ToNumber // // Cast the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler); } // ToObject // // Cast the object referenced by the accumulator to a JSObject. void Interpreter::DoToObject(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToObject(isolate_), assembler); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } Node* Interpreter::BuildToBoolean(Node* value, InterpreterAssembler* assembler) { Node* context = __ GetContext(); return ToBooleanStub::Generate(assembler, value, context); } Node* Interpreter::BuildLogicalNot(Node* value, InterpreterAssembler* assembler) { Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfWordEqual(value, true_value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); return result.value(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(value, assembler); Node* result = BuildLogicalNot(to_boolean_value, assembler); __ SetAccumulator(result); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* result = BuildLogicalNot(value, assembler); __ SetAccumulator(result); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::Typeof(isolate_), assembler); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // TODO(rmcilroy): Use the call type feedback slot to call via CallStub. Node* result = __ CallJS(function, context, receiver_arg, args_count, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { DoCallRuntimeCommon(assembler); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { DoCallRuntimeForPairCommon(assembler); } void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* function = __ LoadContextSlot(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { DoCallJSRuntimeCommon(assembler); } void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) { Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_); Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallConstruct(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // New // // Call operator new with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoNew(InterpreterAssembler* assembler) { DoCallConstruct(assembler); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThanOrEqual // // Test if the value in the register is greater than or equal to the // accumulator. void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestIn // // Test if the object referenced by the register operand is a property of the // object referenced by the accumulator. void Interpreter::DoTestIn(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestInstanceOf // // Test if the object referenced by the register is an an instance of type // referenced by the accumulator. void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Jump // // Jump by number of bytes represented by the immediate operand |imm|. void Interpreter::DoJump(InterpreterAssembler* assembler) { Node* relative_jump = __ BytecodeOperandImm(0); __ Jump(relative_jump); } // JumpConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool. void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ Jump(relative_jump); } // JumpIfTrue // // Jump by number of bytes represented by an immediate operand if the // accumulator contains true. void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains true. void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfFalse // // Jump by number of bytes represented by an immediate operand if the // accumulator contains false. void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains false. void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfToBooleanTrue // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is true when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is true when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanTrueConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanFalse // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is false when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfToBooleanFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is false when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanFalseConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfNull // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfNullConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfUndefined // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfUndefinedConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfNotHole // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the hole. void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // JumpIfNotHoleConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the hole constant. void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // CreateRegExpLiteral // // Creates a regular expression literal for literal index with // and the pattern in . void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) { Callable callable = CodeFactory::FastCloneRegExp(isolate_); Node* target = __ HeapConstant(callable.code()); Node* index = __ BytecodeOperandIdx(0); Node* pattern = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure, literal_index, pattern, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateArrayLiteral // // Creates an array literal for literal index with flags // and constant elements in . void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateObjectLiteral // // Creates an object literal for literal index with // CreateObjectLiteralFlags and constant elements in . void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* bytecode_flags = __ BytecodeOperandFlag(2); Node* closure = __ LoadRegister(Register::function_closure()); // Check if we can do a fast clone or have to call the runtime. Label if_fast_clone(assembler), if_not_fast_clone(assembler, Label::kDeferred); Node* fast_clone_properties_count = __ BitFieldDecode( bytecode_flags); __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); __ Bind(&if_fast_clone); { // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. Node* result = FastCloneShallowObjectStub::GenerateFastPath( assembler, &if_not_fast_clone, closure, literal_index, fast_clone_properties_count); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_not_fast_clone); { // If we can't do a fast clone, call into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); Node* flags_raw = __ Word32And( bytecode_flags, __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); Node* flags = __ SmiTag(flags_raw); Node* result = __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } } // CreateClosure // // Creates a new closure for SharedFunctionInfo at position |index| in the // constant pool and with the PretenureFlag . void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of // calling into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* shared = __ LoadConstantPoolEntry(index); Node* tenured_raw = __ BytecodeOperandFlag(1); Node* tenured = __ SmiTag(tenured_raw); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured); __ SetAccumulator(result); __ Dispatch(); } // CreateMappedArguments // // Creates a new mapped arguments object. void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Label if_duplicate_parameters(assembler, Label::kDeferred); Label if_not_duplicate_parameters(assembler); // Check if function has duplicate parameters. // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports // duplicate parameters. Node* shared_info = __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); Node* compiler_hints = __ LoadObjectField( shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, MachineType::Uint8()); Node* duplicate_parameters_bit = __ Int32Constant( 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); __ Bind(&if_not_duplicate_parameters); { // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_duplicate_parameters); { Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); __ SetAccumulator(result); __ Dispatch(); } } // CreateUnmappedArguments // // Creates a new unmapped arguments object. void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* context = __ GetContext(); Node* closure = __ LoadRegister(Register::function_closure()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // CreateRestParameter // // Creates a new rest parameter array. void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewRestParameter(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // StackCheck // // Performs a stack guard check. void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); Node* interrupt = __ StackCheckTriggeredInterrupt(); __ BranchIf(interrupt, &stack_check_interrupt, &ok); __ Bind(&ok); __ Dispatch(); __ Bind(&stack_check_interrupt); { Node* context = __ GetContext(); __ CallRuntime(Runtime::kStackGuard, context); __ Dispatch(); } } // Throw // // Throws the exception in the accumulator. void Interpreter::DoThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // ReThrow // // Re-throws the exception in the accumulator. void Interpreter::DoReThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kReThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // Return // // Return the value in the accumulator. void Interpreter::DoReturn(InterpreterAssembler* assembler) { __ UpdateInterruptBudgetOnReturn(); Node* accumulator = __ GetAccumulator(); __ Return(accumulator); } // Debugger // // Call runtime to handle debugger statement. void Interpreter::DoDebugger(InterpreterAssembler* assembler) { Node* context = __ GetContext(); __ CallRuntime(Runtime::kHandleDebuggerStatement, context); __ Dispatch(); } // DebugBreak // // Call runtime to handle a debug break. #define DEBUG_BREAK(Name, ...) \ void Interpreter::Do##Name(InterpreterAssembler* assembler) { \ Node* context = __ GetContext(); \ Node* accumulator = __ GetAccumulator(); \ Node* original_handler = \ __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ __ DispatchToBytecodeHandler(original_handler); \ } DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); #undef DEBUG_BREAK // ForInPrepare // // Returns state for for..in loop execution based on the object in the // accumulator. The result is output in registers |cache_info_triple| to // |cache_info_triple + 2|, with the registers holding cache_type, cache_array, // and cache_length respectively. void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); // Set output registers: // 0 == cache_type, 1 == cache_array, 2 == cache_length Node* output_register = __ BytecodeOperandReg(0); for (int i = 0; i < 3; i++) { Node* cache_info = __ Projection(i, result_triple); __ StoreRegister(cache_info, output_register); output_register = __ NextRegister(output_register); } __ Dispatch(); } // ForInNext // // Returns the next enumerable property in the the accumulator. void Interpreter::DoForInNext(InterpreterAssembler* assembler) { Node* receiver_reg = __ BytecodeOperandReg(0); Node* receiver = __ LoadRegister(receiver_reg); Node* index_reg = __ BytecodeOperandReg(1); Node* index = __ LoadRegister(index_reg); Node* cache_type_reg = __ BytecodeOperandReg(2); Node* cache_type = __ LoadRegister(cache_type_reg); Node* cache_array_reg = __ NextRegister(cache_type_reg); Node* cache_array = __ LoadRegister(cache_array_reg); // Load the next key from the enumeration array. Node* key = __ LoadFixedArrayElement(cache_array, index, 0, CodeStubAssembler::SMI_PARAMETERS); // Check if we can use the for-in fast path potentially using the enum cache. Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* condition = __ WordEqual(receiver_map, cache_type); __ BranchIf(condition, &if_fast, &if_slow); __ Bind(&if_fast); { // Enum cache in use for {receiver}, the {key} is definitely valid. __ SetAccumulator(key); __ Dispatch(); } __ Bind(&if_slow); { // Record the fact that we hit the for-in slow path. Node* vector_index = __ BytecodeOperandIdx(3); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* megamorphic_sentinel = __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_)); __ StoreFixedArrayElement(type_feedback_vector, vector_index, megamorphic_sentinel, SKIP_WRITE_BARRIER); // Need to filter the {key} for the {receiver}. Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kForInFilter, context, receiver, key); __ SetAccumulator(result); __ Dispatch(); } } // ForInDone // // Returns true if the end of the enumerable properties has been reached. void Interpreter::DoForInDone(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* cache_length_reg = __ BytecodeOperandReg(1); Node* cache_length = __ LoadRegister(cache_length_reg); // Check if {index} is at {cache_length} already. Label if_true(assembler), if_false(assembler), end(assembler); __ BranchIfWordEqual(index, cache_length, &if_true, &if_false); __ Bind(&if_true); { __ SetAccumulator(__ BooleanConstant(true)); __ Goto(&end); } __ Bind(&if_false); { __ SetAccumulator(__ BooleanConstant(false)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // ForInStep // // Increments the loop counter in register |index| and stores the result // in the accumulator. void Interpreter::DoForInStep(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* one = __ SmiConstant(Smi::FromInt(1)); Node* result = __ SmiAdd(index, one); __ SetAccumulator(result); __ Dispatch(); } // Wide // // Prefix bytecode indicating next bytecode has wide (16-bit) operands. void Interpreter::DoWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kDouble); } // ExtraWide // // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands. void Interpreter::DoExtraWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kQuadruple); } // Illegal // // An invalid bytecode aborting execution if dispatched. void Interpreter::DoIllegal(InterpreterAssembler* assembler) { __ Abort(kInvalidBytecode); } // Nop // // No operation. void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); } // SuspendGenerator
(code)->entry(); } } } // static int Interpreter::InterruptBudget() { // TODO(ignition): Tune code size multiplier. const int kCodeSizeMultiplier = 32; return FLAG_interrupt_budget * kCodeSizeMultiplier; } bool Interpreter::MakeBytecode(CompilationInfo* info) { RuntimeCallTimerScope runtimeTimer(info->isolate(), &RuntimeCallStats::CompileIgnition); TimerEventScope timer(info->isolate()); TRACE_EVENT0("v8", "V8.CompileIgnition"); if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) { OFStream os(stdout); base::SmartArrayPointer name = info->GetDebugName(); os << "[generating bytecode for function: " << info->GetDebugName().get() << "]" << std::endl << std::flush; } #ifdef DEBUG if (info->parse_info() && FLAG_print_source) { OFStream os(stdout); os << "--- Source from AST ---" << std::endl << PrettyPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl << std::flush; } if (info->parse_info() && FLAG_print_ast) { OFStream os(stdout); os << "--- AST ---" << std::endl << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl << std::flush; } #endif // DEBUG BytecodeGenerator generator(info); Handle bytecodes = generator.MakeBytecode(); if (generator.HasStackOverflow()) return false; if (FLAG_print_bytecode) { OFStream os(stdout); bytecodes->Print(os); os << std::flush; } info->SetBytecodeArray(bytecodes); info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline()); return true; } bool Interpreter::IsDispatchTableInitialized() { if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || FLAG_trace_ignition_dispatches) { // Regenerate table to add bytecode tracing operations, print the assembly // code generated by TurboFan or instrument handlers with dispatch counters. return false; } return dispatch_table_[0] != nullptr; } void Interpreter::TraceCodegen(Handle code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* raw_int = __ BytecodeOperandImm(0); Node* smi_int = __ SmiTag(raw_int); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdrUndefined // // Loads undefined into the accumulator and |reg|. void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* destination = __ BytecodeOperandReg(0); __ StoreRegister(undefined_value, destination); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } Node* Interpreter::BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); // Load the global via the LoadGlobalIC. Node* code_target = __ HeapConstant(ic.code()); Node* raw_slot = __ BytecodeOperandIdx(0); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); return __ CallStub(ic.descriptor(), code_target, context, smi_slot, type_feedback_vector); } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrGlobal // // Load the global with name in constant pool entry into // register using FeedBackVector slot outside of a typeof. void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); Node* destination = __ BytecodeOperandReg(1); __ StoreRegister(result, destination); __ Dispatch(); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } compiler::Node* Interpreter::BuildLoadContextSlot( InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); return __ LoadContextSlot(context, slot_index); } // LdaContextSlot // // Load the object in |slot_index| of |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrContextSlot // // Load the object in of into register . void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); __ StoreContextSlot(context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } Node* Interpreter::BuildLoadNamedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry and puts the result into register . void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(3); __ StoreRegister(result, destination); __ Dispatch(); } Node* Interpreter::BuildLoadKeyedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrKeyedProperty // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator and puts the result in register . void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } template void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, lhs, rhs, context); __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } void Interpreter::DoUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, accumulator); __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, value, context); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Cast the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToName(isolate_), assembler); } // ToNumber // // Cast the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler); } // ToObject // // Cast the object referenced by the accumulator to a JSObject. void Interpreter::DoToObject(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToObject(isolate_), assembler); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } Node* Interpreter::BuildToBoolean(Node* value, InterpreterAssembler* assembler) { Node* context = __ GetContext(); return ToBooleanStub::Generate(assembler, value, context); } Node* Interpreter::BuildLogicalNot(Node* value, InterpreterAssembler* assembler) { Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfWordEqual(value, true_value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); return result.value(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(value, assembler); Node* result = BuildLogicalNot(to_boolean_value, assembler); __ SetAccumulator(result); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* result = BuildLogicalNot(value, assembler); __ SetAccumulator(result); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::Typeof(isolate_), assembler); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // TODO(rmcilroy): Use the call type feedback slot to call via CallStub. Node* result = __ CallJS(function, context, receiver_arg, args_count, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { DoCallRuntimeCommon(assembler); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { DoCallRuntimeForPairCommon(assembler); } void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* function = __ LoadContextSlot(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { DoCallJSRuntimeCommon(assembler); } void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) { Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_); Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallConstruct(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // New // // Call operator new with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoNew(InterpreterAssembler* assembler) { DoCallConstruct(assembler); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThanOrEqual // // Test if the value in the register is greater than or equal to the // accumulator. void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestIn // // Test if the object referenced by the register operand is a property of the // object referenced by the accumulator. void Interpreter::DoTestIn(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestInstanceOf // // Test if the object referenced by the register is an an instance of type // referenced by the accumulator. void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Jump // // Jump by number of bytes represented by the immediate operand |imm|. void Interpreter::DoJump(InterpreterAssembler* assembler) { Node* relative_jump = __ BytecodeOperandImm(0); __ Jump(relative_jump); } // JumpConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool. void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ Jump(relative_jump); } // JumpIfTrue // // Jump by number of bytes represented by an immediate operand if the // accumulator contains true. void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains true. void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfFalse // // Jump by number of bytes represented by an immediate operand if the // accumulator contains false. void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains false. void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfToBooleanTrue // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is true when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is true when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanTrueConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanFalse // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is false when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfToBooleanFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is false when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanFalseConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfNull // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfNullConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfUndefined // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfUndefinedConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfNotHole // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the hole. void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // JumpIfNotHoleConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the hole constant. void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // CreateRegExpLiteral // // Creates a regular expression literal for literal index with // and the pattern in . void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) { Callable callable = CodeFactory::FastCloneRegExp(isolate_); Node* target = __ HeapConstant(callable.code()); Node* index = __ BytecodeOperandIdx(0); Node* pattern = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure, literal_index, pattern, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateArrayLiteral // // Creates an array literal for literal index with flags // and constant elements in . void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateObjectLiteral // // Creates an object literal for literal index with // CreateObjectLiteralFlags and constant elements in . void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* bytecode_flags = __ BytecodeOperandFlag(2); Node* closure = __ LoadRegister(Register::function_closure()); // Check if we can do a fast clone or have to call the runtime. Label if_fast_clone(assembler), if_not_fast_clone(assembler, Label::kDeferred); Node* fast_clone_properties_count = __ BitFieldDecode( bytecode_flags); __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); __ Bind(&if_fast_clone); { // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. Node* result = FastCloneShallowObjectStub::GenerateFastPath( assembler, &if_not_fast_clone, closure, literal_index, fast_clone_properties_count); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_not_fast_clone); { // If we can't do a fast clone, call into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); Node* flags_raw = __ Word32And( bytecode_flags, __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); Node* flags = __ SmiTag(flags_raw); Node* result = __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } } // CreateClosure // // Creates a new closure for SharedFunctionInfo at position |index| in the // constant pool and with the PretenureFlag . void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of // calling into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* shared = __ LoadConstantPoolEntry(index); Node* tenured_raw = __ BytecodeOperandFlag(1); Node* tenured = __ SmiTag(tenured_raw); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured); __ SetAccumulator(result); __ Dispatch(); } // CreateMappedArguments // // Creates a new mapped arguments object. void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Label if_duplicate_parameters(assembler, Label::kDeferred); Label if_not_duplicate_parameters(assembler); // Check if function has duplicate parameters. // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports // duplicate parameters. Node* shared_info = __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); Node* compiler_hints = __ LoadObjectField( shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, MachineType::Uint8()); Node* duplicate_parameters_bit = __ Int32Constant( 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); __ Bind(&if_not_duplicate_parameters); { // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_duplicate_parameters); { Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); __ SetAccumulator(result); __ Dispatch(); } } // CreateUnmappedArguments // // Creates a new unmapped arguments object. void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* context = __ GetContext(); Node* closure = __ LoadRegister(Register::function_closure()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // CreateRestParameter // // Creates a new rest parameter array. void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewRestParameter(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // StackCheck // // Performs a stack guard check. void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); Node* interrupt = __ StackCheckTriggeredInterrupt(); __ BranchIf(interrupt, &stack_check_interrupt, &ok); __ Bind(&ok); __ Dispatch(); __ Bind(&stack_check_interrupt); { Node* context = __ GetContext(); __ CallRuntime(Runtime::kStackGuard, context); __ Dispatch(); } } // Throw // // Throws the exception in the accumulator. void Interpreter::DoThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // ReThrow // // Re-throws the exception in the accumulator. void Interpreter::DoReThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kReThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // Return // // Return the value in the accumulator. void Interpreter::DoReturn(InterpreterAssembler* assembler) { __ UpdateInterruptBudgetOnReturn(); Node* accumulator = __ GetAccumulator(); __ Return(accumulator); } // Debugger // // Call runtime to handle debugger statement. void Interpreter::DoDebugger(InterpreterAssembler* assembler) { Node* context = __ GetContext(); __ CallRuntime(Runtime::kHandleDebuggerStatement, context); __ Dispatch(); } // DebugBreak // // Call runtime to handle a debug break. #define DEBUG_BREAK(Name, ...) \ void Interpreter::Do##Name(InterpreterAssembler* assembler) { \ Node* context = __ GetContext(); \ Node* accumulator = __ GetAccumulator(); \ Node* original_handler = \ __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ __ DispatchToBytecodeHandler(original_handler); \ } DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); #undef DEBUG_BREAK // ForInPrepare // // Returns state for for..in loop execution based on the object in the // accumulator. The result is output in registers |cache_info_triple| to // |cache_info_triple + 2|, with the registers holding cache_type, cache_array, // and cache_length respectively. void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); // Set output registers: // 0 == cache_type, 1 == cache_array, 2 == cache_length Node* output_register = __ BytecodeOperandReg(0); for (int i = 0; i < 3; i++) { Node* cache_info = __ Projection(i, result_triple); __ StoreRegister(cache_info, output_register); output_register = __ NextRegister(output_register); } __ Dispatch(); } // ForInNext // // Returns the next enumerable property in the the accumulator. void Interpreter::DoForInNext(InterpreterAssembler* assembler) { Node* receiver_reg = __ BytecodeOperandReg(0); Node* receiver = __ LoadRegister(receiver_reg); Node* index_reg = __ BytecodeOperandReg(1); Node* index = __ LoadRegister(index_reg); Node* cache_type_reg = __ BytecodeOperandReg(2); Node* cache_type = __ LoadRegister(cache_type_reg); Node* cache_array_reg = __ NextRegister(cache_type_reg); Node* cache_array = __ LoadRegister(cache_array_reg); // Load the next key from the enumeration array. Node* key = __ LoadFixedArrayElement(cache_array, index, 0, CodeStubAssembler::SMI_PARAMETERS); // Check if we can use the for-in fast path potentially using the enum cache. Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* condition = __ WordEqual(receiver_map, cache_type); __ BranchIf(condition, &if_fast, &if_slow); __ Bind(&if_fast); { // Enum cache in use for {receiver}, the {key} is definitely valid. __ SetAccumulator(key); __ Dispatch(); } __ Bind(&if_slow); { // Record the fact that we hit the for-in slow path. Node* vector_index = __ BytecodeOperandIdx(3); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* megamorphic_sentinel = __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_)); __ StoreFixedArrayElement(type_feedback_vector, vector_index, megamorphic_sentinel, SKIP_WRITE_BARRIER); // Need to filter the {key} for the {receiver}. Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kForInFilter, context, receiver, key); __ SetAccumulator(result); __ Dispatch(); } } // ForInDone // // Returns true if the end of the enumerable properties has been reached. void Interpreter::DoForInDone(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* cache_length_reg = __ BytecodeOperandReg(1); Node* cache_length = __ LoadRegister(cache_length_reg); // Check if {index} is at {cache_length} already. Label if_true(assembler), if_false(assembler), end(assembler); __ BranchIfWordEqual(index, cache_length, &if_true, &if_false); __ Bind(&if_true); { __ SetAccumulator(__ BooleanConstant(true)); __ Goto(&end); } __ Bind(&if_false); { __ SetAccumulator(__ BooleanConstant(false)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // ForInStep // // Increments the loop counter in register |index| and stores the result // in the accumulator. void Interpreter::DoForInStep(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* one = __ SmiConstant(Smi::FromInt(1)); Node* result = __ SmiAdd(index, one); __ SetAccumulator(result); __ Dispatch(); } // Wide // // Prefix bytecode indicating next bytecode has wide (16-bit) operands. void Interpreter::DoWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kDouble); } // ExtraWide // // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands. void Interpreter::DoExtraWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kQuadruple); } // Illegal // // An invalid bytecode aborting execution if dispatched. void Interpreter::DoIllegal(InterpreterAssembler* assembler) { __ Abort(kInvalidBytecode); } // Nop // // No operation. void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); } // SuspendGenerator
code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* raw_int = __ BytecodeOperandImm(0); Node* smi_int = __ SmiTag(raw_int); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdrUndefined // // Loads undefined into the accumulator and |reg|. void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* destination = __ BytecodeOperandReg(0); __ StoreRegister(undefined_value, destination); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } Node* Interpreter::BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); // Load the global via the LoadGlobalIC. Node* code_target = __ HeapConstant(ic.code()); Node* raw_slot = __ BytecodeOperandIdx(0); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); return __ CallStub(ic.descriptor(), code_target, context, smi_slot, type_feedback_vector); } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrGlobal // // Load the global with name in constant pool entry into // register using FeedBackVector slot outside of a typeof. void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); Node* destination = __ BytecodeOperandReg(1); __ StoreRegister(result, destination); __ Dispatch(); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF); Node* result = BuildLoadGlobal(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } compiler::Node* Interpreter::BuildLoadContextSlot( InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); return __ LoadContextSlot(context, slot_index); } // LdaContextSlot // // Load the object in |slot_index| of |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrContextSlot // // Load the object in of into register . void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) { Node* result = BuildLoadContextSlot(assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); __ StoreContextSlot(context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } Node* Interpreter::BuildLoadNamedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry and puts the result into register . void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* result = BuildLoadNamedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(3); __ StoreRegister(result, destination); __ Dispatch(); } Node* Interpreter::BuildLoadKeyedProperty(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); return __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, type_feedback_vector); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); __ SetAccumulator(result); __ Dispatch(); } // LdrKeyedProperty // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator and puts the result in register . void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* result = BuildLoadKeyedProperty(ic, assembler); Node* destination = __ BytecodeOperandReg(2); __ StoreRegister(result, destination); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, type_feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } template void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, lhs, rhs, context); __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } void Interpreter::DoUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, accumulator); __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = Generator::Generate(assembler, value, context); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Cast the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToName(isolate_), assembler); } // ToNumber // // Cast the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler); } // ToObject // // Cast the object referenced by the accumulator to a JSObject. void Interpreter::DoToObject(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::ToObject(isolate_), assembler); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { DoUnaryOp(assembler); } Node* Interpreter::BuildToBoolean(Node* value, InterpreterAssembler* assembler) { Node* context = __ GetContext(); return ToBooleanStub::Generate(assembler, value, context); } Node* Interpreter::BuildLogicalNot(Node* value, InterpreterAssembler* assembler) { Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfWordEqual(value, true_value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); return result.value(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(value, assembler); Node* result = BuildLogicalNot(to_boolean_value, assembler); __ SetAccumulator(result); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* result = BuildLogicalNot(value, assembler); __ SetAccumulator(result); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { DoUnaryOp(CodeFactory::Typeof(isolate_), assembler); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // TODO(rmcilroy): Use the call type feedback slot to call via CallStub. Node* result = __ CallJS(function, context, receiver_arg, args_count, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { DoCallRuntimeCommon(assembler); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { DoCallRuntimeForPairCommon(assembler); } void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX); Node* function = __ LoadContextSlot(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { DoCallJSRuntimeCommon(assembler); } void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) { Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_); Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallConstruct(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // New // // Call operator new with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoNew(InterpreterAssembler* assembler) { DoCallConstruct(assembler); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestGreaterThanOrEqual // // Test if the value in the register is greater than or equal to the // accumulator. void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestIn // // Test if the object referenced by the register operand is a property of the // object referenced by the accumulator. void Interpreter::DoTestIn(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // TestInstanceOf // // Test if the object referenced by the register is an an instance of type // referenced by the accumulator. void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) { DoBinaryOp(assembler); } // Jump // // Jump by number of bytes represented by the immediate operand |imm|. void Interpreter::DoJump(InterpreterAssembler* assembler) { Node* relative_jump = __ BytecodeOperandImm(0); __ Jump(relative_jump); } // JumpConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool. void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ Jump(relative_jump); } // JumpIfTrue // // Jump by number of bytes represented by an immediate operand if the // accumulator contains true. void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains true. void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(accumulator, true_value, relative_jump); } // JumpIfFalse // // Jump by number of bytes represented by an immediate operand if the // accumulator contains false. void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the accumulator contains false. void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(accumulator, false_value, relative_jump); } // JumpIfToBooleanTrue // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is true when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanTrueConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is true when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanTrueConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* true_value = __ BooleanConstant(true); __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump); } // JumpIfToBooleanFalse // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is false when the object is cast to boolean. void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* relative_jump = __ BytecodeOperandImm(0); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfToBooleanFalseConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is false when the object is cast // to boolean. void Interpreter::DoJumpIfToBooleanFalseConstant( InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* to_boolean_value = BuildToBoolean(accumulator, assembler); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); Node* false_value = __ BooleanConstant(false); __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump); } // JumpIfNull // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfNullConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the null constant. void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, null_value, relative_jump); } // JumpIfUndefined // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfUndefinedConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the undefined constant. void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); } // JumpIfNotHole // // Jump by number of bytes represented by an immediate operand if the object // referenced by the accumulator is the hole. void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* relative_jump = __ BytecodeOperandImm(0); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // JumpIfNotHoleConstant // // Jump by number of bytes in the Smi in the |idx| entry in the constant pool // if the object referenced by the accumulator is the hole constant. void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) { Node* accumulator = __ GetAccumulator(); Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); Node* relative_jump = __ SmiUntag(constant); __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); } // CreateRegExpLiteral // // Creates a regular expression literal for literal index with // and the pattern in . void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) { Callable callable = CodeFactory::FastCloneRegExp(isolate_); Node* target = __ HeapConstant(callable.code()); Node* index = __ BytecodeOperandIdx(0); Node* pattern = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure, literal_index, pattern, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateArrayLiteral // // Creates an array literal for literal index with flags // and constant elements in . void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* flags_raw = __ BytecodeOperandFlag(2); Node* flags = __ SmiTag(flags_raw); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } // CreateObjectLiteral // // Creates an object literal for literal index with // CreateObjectLiteralFlags and constant elements in . void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { Node* literal_index_raw = __ BytecodeOperandIdx(1); Node* literal_index = __ SmiTag(literal_index_raw); Node* bytecode_flags = __ BytecodeOperandFlag(2); Node* closure = __ LoadRegister(Register::function_closure()); // Check if we can do a fast clone or have to call the runtime. Label if_fast_clone(assembler), if_not_fast_clone(assembler, Label::kDeferred); Node* fast_clone_properties_count = __ BitFieldDecode( bytecode_flags); __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); __ Bind(&if_fast_clone); { // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. Node* result = FastCloneShallowObjectStub::GenerateFastPath( assembler, &if_not_fast_clone, closure, literal_index, fast_clone_properties_count); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_not_fast_clone); { // If we can't do a fast clone, call into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* constant_elements = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); Node* flags_raw = __ Word32And( bytecode_flags, __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); Node* flags = __ SmiTag(flags_raw); Node* result = __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, literal_index, constant_elements, flags); __ SetAccumulator(result); __ Dispatch(); } } // CreateClosure // // Creates a new closure for SharedFunctionInfo at position |index| in the // constant pool and with the PretenureFlag . void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of // calling into the runtime. Node* index = __ BytecodeOperandIdx(0); Node* shared = __ LoadConstantPoolEntry(index); Node* tenured_raw = __ BytecodeOperandFlag(1); Node* tenured = __ SmiTag(tenured_raw); Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured); __ SetAccumulator(result); __ Dispatch(); } // CreateMappedArguments // // Creates a new mapped arguments object. void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Label if_duplicate_parameters(assembler, Label::kDeferred); Label if_not_duplicate_parameters(assembler); // Check if function has duplicate parameters. // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports // duplicate parameters. Node* shared_info = __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); Node* compiler_hints = __ LoadObjectField( shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, MachineType::Uint8()); Node* duplicate_parameters_bit = __ Int32Constant( 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); __ Bind(&if_not_duplicate_parameters); { // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } __ Bind(&if_duplicate_parameters); { Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); __ SetAccumulator(result); __ Dispatch(); } } // CreateUnmappedArguments // // Creates a new unmapped arguments object. void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* context = __ GetContext(); Node* closure = __ LoadRegister(Register::function_closure()); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // CreateRestParameter // // Creates a new rest parameter array. void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub. Callable callable = CodeFactory::FastNewRestParameter(isolate_, true); Node* target = __ HeapConstant(callable.code()); Node* closure = __ LoadRegister(Register::function_closure()); Node* context = __ GetContext(); Node* result = __ CallStub(callable.descriptor(), target, context, closure); __ SetAccumulator(result); __ Dispatch(); } // StackCheck // // Performs a stack guard check. void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); Node* interrupt = __ StackCheckTriggeredInterrupt(); __ BranchIf(interrupt, &stack_check_interrupt, &ok); __ Bind(&ok); __ Dispatch(); __ Bind(&stack_check_interrupt); { Node* context = __ GetContext(); __ CallRuntime(Runtime::kStackGuard, context); __ Dispatch(); } } // Throw // // Throws the exception in the accumulator. void Interpreter::DoThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // ReThrow // // Re-throws the exception in the accumulator. void Interpreter::DoReThrow(InterpreterAssembler* assembler) { Node* exception = __ GetAccumulator(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kReThrow, context, exception); // We shouldn't ever return from a throw. __ Abort(kUnexpectedReturnFromThrow); } // Return // // Return the value in the accumulator. void Interpreter::DoReturn(InterpreterAssembler* assembler) { __ UpdateInterruptBudgetOnReturn(); Node* accumulator = __ GetAccumulator(); __ Return(accumulator); } // Debugger // // Call runtime to handle debugger statement. void Interpreter::DoDebugger(InterpreterAssembler* assembler) { Node* context = __ GetContext(); __ CallRuntime(Runtime::kHandleDebuggerStatement, context); __ Dispatch(); } // DebugBreak // // Call runtime to handle a debug break. #define DEBUG_BREAK(Name, ...) \ void Interpreter::Do##Name(InterpreterAssembler* assembler) { \ Node* context = __ GetContext(); \ Node* accumulator = __ GetAccumulator(); \ Node* original_handler = \ __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ __ DispatchToBytecodeHandler(original_handler); \ } DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); #undef DEBUG_BREAK // ForInPrepare // // Returns state for for..in loop execution based on the object in the // accumulator. The result is output in registers |cache_info_triple| to // |cache_info_triple + 2|, with the registers holding cache_type, cache_array, // and cache_length respectively. void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); // Set output registers: // 0 == cache_type, 1 == cache_array, 2 == cache_length Node* output_register = __ BytecodeOperandReg(0); for (int i = 0; i < 3; i++) { Node* cache_info = __ Projection(i, result_triple); __ StoreRegister(cache_info, output_register); output_register = __ NextRegister(output_register); } __ Dispatch(); } // ForInNext // // Returns the next enumerable property in the the accumulator. void Interpreter::DoForInNext(InterpreterAssembler* assembler) { Node* receiver_reg = __ BytecodeOperandReg(0); Node* receiver = __ LoadRegister(receiver_reg); Node* index_reg = __ BytecodeOperandReg(1); Node* index = __ LoadRegister(index_reg); Node* cache_type_reg = __ BytecodeOperandReg(2); Node* cache_type = __ LoadRegister(cache_type_reg); Node* cache_array_reg = __ NextRegister(cache_type_reg); Node* cache_array = __ LoadRegister(cache_array_reg); // Load the next key from the enumeration array. Node* key = __ LoadFixedArrayElement(cache_array, index, 0, CodeStubAssembler::SMI_PARAMETERS); // Check if we can use the for-in fast path potentially using the enum cache. Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* condition = __ WordEqual(receiver_map, cache_type); __ BranchIf(condition, &if_fast, &if_slow); __ Bind(&if_fast); { // Enum cache in use for {receiver}, the {key} is definitely valid. __ SetAccumulator(key); __ Dispatch(); } __ Bind(&if_slow); { // Record the fact that we hit the for-in slow path. Node* vector_index = __ BytecodeOperandIdx(3); Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* megamorphic_sentinel = __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_)); __ StoreFixedArrayElement(type_feedback_vector, vector_index, megamorphic_sentinel, SKIP_WRITE_BARRIER); // Need to filter the {key} for the {receiver}. Node* context = __ GetContext(); Node* result = __ CallRuntime(Runtime::kForInFilter, context, receiver, key); __ SetAccumulator(result); __ Dispatch(); } } // ForInDone // // Returns true if the end of the enumerable properties has been reached. void Interpreter::DoForInDone(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* cache_length_reg = __ BytecodeOperandReg(1); Node* cache_length = __ LoadRegister(cache_length_reg); // Check if {index} is at {cache_length} already. Label if_true(assembler), if_false(assembler), end(assembler); __ BranchIfWordEqual(index, cache_length, &if_true, &if_false); __ Bind(&if_true); { __ SetAccumulator(__ BooleanConstant(true)); __ Goto(&end); } __ Bind(&if_false); { __ SetAccumulator(__ BooleanConstant(false)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // ForInStep // // Increments the loop counter in register |index| and stores the result // in the accumulator. void Interpreter::DoForInStep(InterpreterAssembler* assembler) { Node* index_reg = __ BytecodeOperandReg(0); Node* index = __ LoadRegister(index_reg); Node* one = __ SmiConstant(Smi::FromInt(1)); Node* result = __ SmiAdd(index, one); __ SetAccumulator(result); __ Dispatch(); } // Wide // // Prefix bytecode indicating next bytecode has wide (16-bit) operands. void Interpreter::DoWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kDouble); } // ExtraWide // // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands. void Interpreter::DoExtraWide(InterpreterAssembler* assembler) { __ DispatchWide(OperandScale::kQuadruple); } // Illegal // // An invalid bytecode aborting execution if dispatched. void Interpreter::DoIllegal(InterpreterAssembler* assembler) { __ Abort(kInvalidBytecode); } // Nop // // No operation. void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); } // SuspendGenerator