// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef V8_X64_CODEGEN_X64_H_ #define V8_X64_CODEGEN_X64_H_ namespace v8 { namespace internal { // Forward declarations class CompilationInfo; class DeferredCode; class RegisterAllocator; class RegisterFile; enum InitState { CONST_INIT, NOT_CONST_INIT }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; // ------------------------------------------------------------------------- // Reference support // A reference is a C++ stack-allocated object that puts a // reference on the virtual frame. The reference may be consumed // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference. // When the lifetime (scope) of a valid reference ends, it must have // been consumed, and be in state UNLOADED. class Reference BASE_EMBEDDED { public: // The values of the types is important, see size(). enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 }; Reference(CodeGenerator* cgen, Expression* expression, bool persist_after_get = false); ~Reference(); Expression* expression() const { return expression_; } Type type() const { return type_; } void set_type(Type value) { ASSERT_EQ(ILLEGAL, type_); type_ = value; } void set_unloaded() { ASSERT_NE(ILLEGAL, type_); ASSERT_NE(UNLOADED, type_); type_ = UNLOADED; } // The size the reference takes up on the stack. int size() const { return (type_ < SLOT) ? 0 : type_; } bool is_illegal() const { return type_ == ILLEGAL; } bool is_slot() const { return type_ == SLOT; } bool is_property() const { return type_ == NAMED || type_ == KEYED; } bool is_unloaded() const { return type_ == UNLOADED; } // Return the name. Only valid for named property references. Handle<String> GetName(); // Generate code to push the value of the reference on top of the // expression stack. The reference is expected to be already on top of // the expression stack, and it is consumed by the call unless the // reference is for a compound assignment. // If the reference is not consumed, it is left in place under its value. void GetValue(); // Like GetValue except that the slot is expected to be written to before // being read from again. The value of the reference may be invalidated, // causing subsequent attempts to read it to fail. void TakeValue(); // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value // on the expression stack. The value is stored in the location specified // by the reference, and is left on top of the stack, after the reference // is popped from beneath it (unloaded). void SetValue(InitState init_state); private: CodeGenerator* cgen_; Expression* expression_; Type type_; bool persist_after_get_; }; // ------------------------------------------------------------------------- // Control destinations. // A control destination encapsulates a pair of jump targets and a // flag indicating which one is the preferred fall-through. The // preferred fall-through must be unbound, the other may be already // bound (ie, a backward target). // // The true and false targets may be jumped to unconditionally or // control may split conditionally. Unconditional jumping and // splitting should be emitted in tail position (as the last thing // when compiling an expression) because they can cause either label // to be bound or the non-fall through to be jumped to leaving an // invalid virtual frame. // // The labels in the control destination can be extracted and // manipulated normally without affecting the state of the // destination. class ControlDestination BASE_EMBEDDED { public: ControlDestination(JumpTarget* true_target, JumpTarget* false_target, bool true_is_fall_through) : true_target_(true_target), false_target_(false_target), true_is_fall_through_(true_is_fall_through), is_used_(false) { ASSERT(true_is_fall_through ? !true_target->is_bound() : !false_target->is_bound()); } // Accessors for the jump targets. Directly jumping or branching to // or binding the targets will not update the destination's state. JumpTarget* true_target() const { return true_target_; } JumpTarget* false_target() const { return false_target_; } // True if the the destination has been jumped to unconditionally or // control has been split to both targets. This predicate does not // test whether the targets have been extracted and manipulated as // raw jump targets. bool is_used() const { return is_used_; } // True if the destination is used and the true target (respectively // false target) was the fall through. If the target is backward, // "fall through" included jumping unconditionally to it. bool true_was_fall_through() const { return is_used_ && true_is_fall_through_; } bool false_was_fall_through() const { return is_used_ && !true_is_fall_through_; } // Emit a branch to one of the true or false targets, and bind the // other target. Because this binds the fall-through target, it // should be emitted in tail position (as the last thing when // compiling an expression). void Split(Condition cc) { ASSERT(!is_used_); if (true_is_fall_through_) { false_target_->Branch(NegateCondition(cc)); true_target_->Bind(); } else { true_target_->Branch(cc); false_target_->Bind(); } is_used_ = true; } // Emit an unconditional jump in tail position, to the true target // (if the argument is true) or the false target. The "jump" will // actually bind the jump target if it is forward, jump to it if it // is backward. void Goto(bool where) { ASSERT(!is_used_); JumpTarget* target = where ? true_target_ : false_target_; if (target->is_bound()) { target->Jump(); } else { target->Bind(); } is_used_ = true; true_is_fall_through_ = where; } // Mark this jump target as used as if Goto had been called, but // without generating a jump or binding a label (the control effect // should have already happened). This is used when the left // subexpression of the short-circuit boolean operators are // compiled. void Use(bool where) { ASSERT(!is_used_); ASSERT((where ? true_target_ : false_target_)->is_bound()); is_used_ = true; true_is_fall_through_ = where; } // Swap the true and false targets but keep the same actual label as // the fall through. This is used when compiling negated // expressions, where we want to swap the targets but preserve the // state. void Invert() { JumpTarget* temp_target = true_target_; true_target_ = false_target_; false_target_ = temp_target; true_is_fall_through_ = !true_is_fall_through_; } private: // True and false jump targets. JumpTarget* true_target_; JumpTarget* false_target_; // Before using the destination: true if the true target is the // preferred fall through, false if the false target is. After // using the destination: true if the true target was actually used // as the fall through, false if the false target was. bool true_is_fall_through_; // True if the Split or Goto functions have been called. bool is_used_; }; // ------------------------------------------------------------------------- // Code generation state // The state is passed down the AST by the code generator (and back up, in // the form of the state of the jump target pair). It is threaded through // the call stack. Constructing a state implicitly pushes it on the owning // code generator's stack of states, and destroying one implicitly pops it. // // The code generator state is only used for expressions, so statements have // the initial state. class CodeGenState BASE_EMBEDDED { public: // Create an initial code generator state. Destroying the initial state // leaves the code generator with a NULL state. explicit CodeGenState(CodeGenerator* owner); // Create a code generator state based on a code generator's current // state. The new state has its own control destination. CodeGenState(CodeGenerator* owner, ControlDestination* destination); // Destroy a code generator state and restore the owning code generator's // previous state. ~CodeGenState(); // Accessors for the state. ControlDestination* destination() const { return destination_; } private: // The owning code generator. CodeGenerator* owner_; // A control destination in case the expression has a control-flow // effect. ControlDestination* destination_; // The previous state of the owning code generator, restored when // this state is destroyed. CodeGenState* previous_; }; // ------------------------------------------------------------------------- // Arguments allocation mode enum ArgumentsAllocationMode { NO_ARGUMENTS_ALLOCATION, EAGER_ARGUMENTS_ALLOCATION, LAZY_ARGUMENTS_ALLOCATION }; // ------------------------------------------------------------------------- // CodeGenerator class CodeGenerator: public AstVisitor { public: // Takes a function literal, generates code for it. This function should only // be called by compiler.cc. static Handle<Code> MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. static void MakeCodePrologue(CompilationInfo* info); // Allocate and install the code. static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm, Code::Flags flags, CompilationInfo* info); #ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); #endif static void RecordPositions(MacroAssembler* masm, int pos); // Accessors MacroAssembler* masm() { return masm_; } VirtualFrame* frame() const { return frame_; } inline Handle<Script> script(); bool has_valid_frame() const { return frame_ != NULL; } // Set the virtual frame to be new_frame, with non-frame register // reference counts given by non_frame_registers. The non-frame // register reference counts of the old frame are returned in // non_frame_registers. void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers); void DeleteFrame(); RegisterAllocator* allocator() const { return allocator_; } CodeGenState* state() { return state_; } void set_state(CodeGenState* state) { state_ = state; } void AddDeferred(DeferredCode* code) { deferred_.Add(code); } bool in_spilled_code() const { return in_spilled_code_; } void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; } private: // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); // Accessors inline bool is_eval(); Scope* scope(); // Generating deferred code. void ProcessDeferred(); // State ControlDestination* destination() const { return state_->destination(); } // Track loop nesting level. int loop_nesting() const { return loop_nesting_; } void IncrementLoopNesting() { loop_nesting_++; } void DecrementLoopNesting() { loop_nesting_--; } // Node visitors. void VisitStatements(ZoneList<Statement*>* statements); #define DEF_VISIT(type) \ void Visit##type(type* node); AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT // Visit a statement and then spill the virtual frame if control flow can // reach the end of the statement (ie, it does not exit via break, // continue, return, or throw). This function is used temporarily while // the code generator is being transformed. void VisitAndSpill(Statement* statement); // Visit a list of statements and then spill the virtual frame if control // flow can reach the end of the list. void VisitStatementsAndSpill(ZoneList<Statement*>* statements); // Main code generation function void Generate(CompilationInfo* info); // Generate the return sequence code. Should be called no more than // once per compiled function, immediately after binding the return // target (which can not be done more than once). void GenerateReturnSequence(Result* return_value); // Returns the arguments allocation mode. ArgumentsAllocationMode ArgumentsMode(); // Store the arguments object and allocate it if necessary. Result StoreArgumentsObject(bool initial); // The following are used by class Reference. void LoadReference(Reference* ref); void UnloadReference(Reference* ref); static Operand ContextOperand(Register context, int index) { return Operand(context, Context::SlotOffset(index)); } Operand SlotOperand(Slot* slot, Register tmp); Operand ContextSlotOperandCheckExtensions(Slot* slot, Result tmp, JumpTarget* slow); // Expressions static Operand GlobalObject() { return ContextOperand(rsi, Context::GLOBAL_INDEX); } void LoadCondition(Expression* x, ControlDestination* destination, bool force_control); void Load(Expression* expr); void LoadGlobal(); void LoadGlobalReceiver(); // Generate code to push the value of an expression on top of the frame // and then spill the frame fully to memory. This function is used // temporarily while the code generator is being transformed. void LoadAndSpill(Expression* expression); // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state); Result LoadFromGlobalSlotCheckExtensions(Slot* slot, TypeofState typeof_state, JumpTarget* slow); // Store the value on top of the expression stack into a slot, leaving the // value in place. void StoreToSlot(Slot* slot, InitState init_state); // Load a property of an object, returning it in a Result. // The object and the property name are passed on the stack, and // not changed. Result EmitKeyedLoad(bool is_global); // Special code for typeof expressions: Unfortunately, we must // be careful when loading the expression in 'typeof' // expressions. We are not allowed to throw reference errors for // non-existing properties of the global object, so we must make it // look like an explicit property access, instead of an access // through the context chain. void LoadTypeofExpression(Expression* x); // Translate the value on top of the frame into control flow to the // control destination. void ToBoolean(ControlDestination* destination); void GenericBinaryOperation( Token::Value op, StaticType* type, OverwriteMode overwrite_mode); // If possible, combine two constant smi values using op to produce // a smi result, and push it on the virtual frame, all at compile time. // Returns true if it succeeds. Otherwise it has no effect. bool FoldConstantSmis(Token::Value op, int left, int right); // Emit code to perform a binary operation on a constant // smi and a likely smi. Consumes the Result *operand. Result ConstantSmiBinaryOperation(Token::Value op, Result* operand, Handle<Object> constant_operand, StaticType* type, bool reversed, OverwriteMode overwrite_mode); // Emit code to perform a binary operation on two likely smis. // The code to handle smi arguments is produced inline. // Consumes the Results *left and *right. Result LikelySmiBinaryOperation(Token::Value op, Result* left, Result* right, OverwriteMode overwrite_mode); void Comparison(AstNode* node, Condition cc, bool strict, ControlDestination* destination); // To prevent long attacker-controlled byte sequences, integer constants // from the JavaScript source are loaded in two parts if they are larger // than 16 bits. static const int kMaxSmiInlinedBits = 16; bool IsUnsafeSmi(Handle<Object> value); // Load an integer constant x into a register target using // at most 16 bits of user-controlled data per assembly operation. void LoadUnsafeSmi(Register target, Handle<Object> value); void CallWithArguments(ZoneList<Expression*>* arguments, CallFunctionFlags flags, int position); // An optimized implementation of expressions of the form // x.apply(y, arguments). We call x the applicand and y the receiver. // The optimization avoids allocating an arguments object if possible. void CallApplyLazy(Expression* applicand, Expression* receiver, VariableProxy* arguments, int position); void CheckStack(); struct InlineRuntimeLUT { void (CodeGenerator::*method)(ZoneList<Expression*>*); const char* name; }; static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name); bool CheckForInlineRuntimeCall(CallRuntime* node); static bool PatchInlineRuntimeEntry(Handle<String> name, const InlineRuntimeLUT& new_entry, InlineRuntimeLUT* old_entry); void ProcessDeclarations(ZoneList<Declaration*>* declarations); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); // Declare global variables and functions in the given array of // name/value pairs. void DeclareGlobals(Handle<FixedArray> pairs); // Instantiate the function boilerplate. void InstantiateBoilerplate(Handle<JSFunction> boilerplate); // Support for type checks. void GenerateIsSmi(ZoneList<Expression*>* args); void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); void GenerateIsArray(ZoneList<Expression*>* args); void GenerateIsRegExp(ZoneList<Expression*>* args); void GenerateIsObject(ZoneList<Expression*>* args); void GenerateIsFunction(ZoneList<Expression*>* args); void GenerateIsUndetectableObject(ZoneList<Expression*>* args); // Support for construct call checks. void GenerateIsConstructCall(ZoneList<Expression*>* args); // Support for arguments.length and arguments[?]. void GenerateArgumentsLength(ZoneList<Expression*>* args); void GenerateArgumentsAccess(ZoneList<Expression*>* args); // Support for accessing the class and value fields of an object. void GenerateClassOf(ZoneList<Expression*>* args); void GenerateValueOf(ZoneList<Expression*>* args); void GenerateSetValueOf(ZoneList<Expression*>* args); // Fast support for charCodeAt(n). void GenerateFastCharCodeAt(ZoneList<Expression*>* args); // Fast support for object equality testing. void GenerateObjectEquals(ZoneList<Expression*>* args); void GenerateLog(ZoneList<Expression*>* args); void GenerateGetFramePointer(ZoneList<Expression*>* args); // Fast support for Math.random(). void GenerateRandomPositiveSmi(ZoneList<Expression*>* args); // Fast support for StringAdd. void GenerateStringAdd(ZoneList<Expression*>* args); // Fast support for SubString. void GenerateSubString(ZoneList<Expression*>* args); // Fast support for StringCompare. void GenerateStringCompare(ZoneList<Expression*>* args); // Support for direct calls from JavaScript to native RegExp code. void GenerateRegExpExec(ZoneList<Expression*>* args); // Fast support for number to string. void GenerateNumberToString(ZoneList<Expression*>* args); // Fast call to math functions. void GenerateMathSin(ZoneList<Expression*>* args); void GenerateMathCos(ZoneList<Expression*>* args); // Simple condition analysis. enum ConditionAnalysis { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW }; ConditionAnalysis AnalyzeCondition(Expression* cond); // Methods used to indicate which source code is generated for. Source // positions are collected by the assembler and emitted with the relocation // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); void CodeForStatementPosition(Statement* node); void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForSourcePosition(int pos); #ifdef DEBUG // True if the registers are valid for entry to a block. There should // be no frame-external references to (non-reserved) registers. bool HasValidEntryRegisters(); #endif ZoneList<DeferredCode*> deferred_; // Assembler MacroAssembler* masm_; // to generate code CompilationInfo* info_; // Code generation state VirtualFrame* frame_; RegisterAllocator* allocator_; CodeGenState* state_; int loop_nesting_; // Jump targets. // The target of the return from the function. BreakTarget function_return_; // True if the function return is shadowed (ie, jumping to the target // function_return_ does not jump to the true function return, but rather // to some unlinking code). bool function_return_is_shadowed_; // True when we are in code that expects the virtual frame to be fully // spilled. Some virtual frame function are disabled in DEBUG builds when // called from spilled code, because they do not leave the virtual frame // in a spilled state. bool in_spilled_code_; static InlineRuntimeLUT kInlineRuntimeLUT[]; friend class VirtualFrame; friend class JumpTarget; friend class Reference; friend class Result; friend class FastCodeGenerator; friend class FullCodeGenerator; friend class FullCodeGenSyntaxChecker; friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; // Flag that indicates how to generate code for the stub GenericBinaryOpStub. enum GenericBinaryFlags { NO_GENERIC_BINARY_FLAGS = 0, NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. }; class GenericBinaryOpStub: public CodeStub { public: GenericBinaryOpStub(Token::Value op, OverwriteMode mode, GenericBinaryFlags flags, NumberInfo::Type operands_type = NumberInfo::kUnknown) : op_(op), mode_(mode), flags_(flags), args_in_registers_(false), args_reversed_(false), name_(NULL), operands_type_(operands_type) { use_sse3_ = CpuFeatures::IsSupported(SSE3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } // Generate code to call the stub with the supplied arguments. This will add // code at the call site to prepare arguments either in registers or on the // stack together with the actual call. void GenerateCall(MacroAssembler* masm, Register left, Register right); void GenerateCall(MacroAssembler* masm, Register left, Smi* right); void GenerateCall(MacroAssembler* masm, Smi* left, Register right); Result GenerateCall(MacroAssembler* masm, VirtualFrame* frame, Result* left, Result* right); private: Token::Value op_; OverwriteMode mode_; GenericBinaryFlags flags_; bool args_in_registers_; // Arguments passed in registers not on the stack. bool args_reversed_; // Left and right argument are swapped. bool use_sse3_; char* name_; NumberInfo::Type operands_type_; const char* GetName(); #ifdef DEBUG void Print() { PrintF("GenericBinaryOpStub %d (op %s), " "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n", MinorKey(), Token::String(op_), static_cast<int>(mode_), static_cast<int>(flags_), static_cast<int>(args_in_registers_), static_cast<int>(args_reversed_), NumberInfo::ToString(operands_type_)); } #endif // Minor key encoding in 16 bits NNNFRASOOOOOOOMM. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class OpBits: public BitField<Token::Value, 2, 7> {}; class SSE3Bits: public BitField<bool, 9, 1> {}; class ArgsInRegistersBits: public BitField<bool, 10, 1> {}; class ArgsReversedBits: public BitField<bool, 11, 1> {}; class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {}; class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {}; Major MajorKey() { return GenericBinaryOp; } int MinorKey() { // Encode the parameters in a unique 16 bit value. return OpBits::encode(op_) | ModeBits::encode(mode_) | FlagBits::encode(flags_) | SSE3Bits::encode(use_sse3_) | ArgsInRegistersBits::encode(args_in_registers_) | ArgsReversedBits::encode(args_reversed_) | NumberInfoBits::encode(operands_type_); } void Generate(MacroAssembler* masm); void GenerateSmiCode(MacroAssembler* masm, Label* slow); void GenerateLoadArguments(MacroAssembler* masm); void GenerateReturn(MacroAssembler* masm); bool ArgsInRegistersSupported() { return (op_ == Token::ADD) || (op_ == Token::SUB) || (op_ == Token::MUL) || (op_ == Token::DIV); } bool IsOperationCommutative() { return (op_ == Token::ADD) || (op_ == Token::MUL); } void SetArgsInRegisters() { args_in_registers_ = true; } void SetArgsReversed() { args_reversed_ = true; } bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } bool HasArgsInRegisters() { return args_in_registers_; } bool HasArgsReversed() { return args_reversed_; } }; class StringStubBase: public CodeStub { public: // Generate code for copying characters using a simple loop. This should only // be used in places where the number of characters is small and the // additional setup and checking in GenerateCopyCharactersREP adds too much // overhead. Copying of overlapping regions is not supported. void GenerateCopyCharacters(MacroAssembler* masm, Register dest, Register src, Register count, bool ascii); // Generate code for copying characters using the rep movs instruction. // Copies rcx characters from rsi to rdi. Copying of overlapping regions is // not supported. void GenerateCopyCharactersREP(MacroAssembler* masm, Register dest, // Must be rdi. Register src, // Must be rsi. Register count, // Must be rcx. bool ascii); }; // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. }; class StringAddStub: public StringStubBase { public: explicit StringAddStub(StringAddFlags flags) { string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); } private: Major MajorKey() { return StringAdd; } int MinorKey() { return string_check_ ? 0 : 1; } void Generate(MacroAssembler* masm); // Should the stub check whether arguments are strings? bool string_check_; }; class SubStringStub: public StringStubBase { public: SubStringStub() {} private: Major MajorKey() { return SubString; } int MinorKey() { return 0; } void Generate(MacroAssembler* masm); }; class StringCompareStub: public CodeStub { public: explicit StringCompareStub() {} // Compare two flat ascii strings and returns result in rax after popping two // arguments from the stack. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4); private: Major MajorKey() { return StringCompare; } int MinorKey() { return 0; } void Generate(MacroAssembler* masm); }; } } // namespace v8::internal #endif // V8_X64_CODEGEN_X64_H_