HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Android 10
|
10.0.0_r6
下载
查看原文件
收藏
根目录
external
v8
src
code-stub-assembler.h
// Copyright 2016 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_CODE_STUB_ASSEMBLER_H_ #define V8_CODE_STUB_ASSEMBLER_H_ #include
#include "src/base/macros.h" #include "src/compiler/code-assembler.h" #include "src/globals.h" #include "src/objects.h" #include "src/objects/bigint.h" #include "src/roots.h" namespace v8 { namespace internal { class CallInterfaceDescriptor; class CodeStubArguments; class CodeStubAssembler; class StatsCounter; class StubCache; enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \ V(EmptyPropertyDictionary, empty_property_dictionary, \ EmptyPropertyDictionary) \ V(PromiseSpeciesProtector, promise_species_protector, \ PromiseSpeciesProtector) \ V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) \ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) #define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \ V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \ AllocationSiteWithoutWeakNextMap) \ V(BooleanMap, boolean_map, BooleanMap) \ V(CodeMap, code_map, CodeMap) \ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \ EmptySlowElementDictionary) \ V(empty_string, empty_string, EmptyString) \ V(FalseValue, false_value, False) \ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \ V(FunctionTemplateInfoMap, function_template_info_map, \ FunctionTemplateInfoMap) \ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ V(HeapNumberMap, heap_number_map, HeapNumberMap) \ V(iterator_symbol, iterator_symbol, IteratorSymbol) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ V(MetaMap, meta_map, MetaMap) \ V(MinusZeroValue, minus_zero_value, MinusZero) \ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \ V(NanValue, nan_value, Nan) \ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \ V(NullValue, null_value, Null) \ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \ V(prototype_string, prototype_string, PrototypeString) \ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \ V(SymbolMap, symbol_map, SymbolMap) \ V(TheHoleValue, the_hole_value, TheHole) \ V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \ V(TrueValue, true_value, True) \ V(Tuple2Map, tuple2_map, Tuple2Map) \ V(Tuple3Map, tuple3_map, Tuple3Map) \ V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \ ArrayBoilerplateDescriptionMap) \ V(UncompiledDataWithoutPreParsedScopeMap, \ uncompiled_data_without_pre_parsed_scope_map, \ UncompiledDataWithoutPreParsedScopeMap) \ V(UncompiledDataWithPreParsedScopeMap, \ uncompiled_data_with_pre_parsed_scope_map, \ UncompiledDataWithPreParsedScopeMap) \ V(UndefinedValue, undefined_value, Undefined) \ V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) #define HEAP_IMMOVABLE_OBJECT_LIST(V) \ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) // Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared // here to simplify use in other generated builtins. struct IteratorRecord { public: // iteratorRecord.[[Iterator]] compiler::TNode
object; // iteratorRecord.[[NextMethod]] compiler::TNode
next; }; #ifdef DEBUG #define CSA_CHECK(csa, x) \ (csa)->Check( \ [&]() -> compiler::Node* { \ return implicit_cast
>(x); \ }, \ #x, __FILE__, __LINE__) #else #define CSA_CHECK(csa, x) (csa)->FastCheck(x) #endif #ifdef DEBUG // Add stringified versions to the given values, except the first. That is, // transform // x, a, b, c, d, e, f // to // a, "a", b, "b", c, "c", d, "d", e, "e", f, "f" // // __VA_ARGS__ is ignored to allow the caller to pass through too many // parameters, and the first element is ignored to support having no extra // values without empty __VA_ARGS__ (which cause all sorts of problems with // extra commas). #define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \ v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5 // Stringify the given variable number of arguments. The arguments are trimmed // to 5 if there are too many, and padded with nullptr if there are not enough. #define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \ CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \ nullptr, nullptr) #define CSA_ASSERT_GET_FIRST(x, ...) (x) #define CSA_ASSERT_GET_FIRST_STR(x, ...) #x // CSA_ASSERT(csa,
,
) // We have to jump through some hoops to allow
to be // empty. #define CSA_ASSERT(csa, ...) \ (csa)->Assert( \ [&]() -> compiler::Node* { \ return implicit_cast
>( \ EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \ }, \ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \ CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) // CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...}, //
) #define CSA_ASSERT_BRANCH(csa, ...) \ (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \ __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) #define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \ (csa)->Assert( \ [&]() -> compiler::Node* { \ compiler::Node* const argc = \ (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \ }, \ "argc " #op " " #expected, __FILE__, __LINE__, \ SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \ "argc") #define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \ CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected) #define CSA_DEBUG_INFO(name) \ { #name, __FILE__, __LINE__ } #define BIND(label) Bind(label, CSA_DEBUG_INFO(label)) #define VARIABLE(name, ...) \ Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__) #define VARIABLE_CONSTRUCTOR(name, ...) \ name(this, CSA_DEBUG_INFO(name), __VA_ARGS__) #define TYPED_VARIABLE_DEF(type, name, ...) \ TVariable
name(CSA_DEBUG_INFO(name), __VA_ARGS__) #else // DEBUG #define CSA_ASSERT(csa, ...) ((void)0) #define CSA_ASSERT_BRANCH(csa, ...) ((void)0) #define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0) #define BIND(label) Bind(label) #define VARIABLE(name, ...) Variable name(this, __VA_ARGS__) #define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__) #define TYPED_VARIABLE_DEF(type, name, ...) TVariable
name(__VA_ARGS__) #endif // DEBUG #define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this)) #ifdef ENABLE_SLOW_DCHECKS #define CSA_SLOW_ASSERT(csa, ...) \ if (FLAG_enable_slow_asserts) { \ CSA_ASSERT(csa, __VA_ARGS__); \ } #else #define CSA_SLOW_ASSERT(csa, ...) ((void)0) #endif class int31_t { public: int31_t() : value_(0) {} int31_t(int value) : value_(value) { // NOLINT(runtime/explicit) DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); } int31_t& operator=(int value) { DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); value_ = value; return *this; } int32_t value() const { return value_; } operator int32_t() const { return value_; } private: int32_t value_; }; // Provides JavaScript-specific "macro-assembler" functionality on top of the // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler, // it's possible to add JavaScript-specific useful CodeAssembler "macros" // without modifying files in the compiler directory (and requiring a review // from a compiler directory OWNER). class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { public: using Node = compiler::Node; template
using TNode = compiler::TNode
; template
using SloppyTNode = compiler::SloppyTNode
; template
using LazyNode = std::function
()>; CodeStubAssembler(compiler::CodeAssemblerState* state); enum AllocationFlag : uint8_t { kNone = 0, kDoubleAlignment = 1, kPretenured = 1 << 1, kAllowLargeObjectAllocation = 1 << 2, }; enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking }; typedef base::Flags
AllocationFlags; enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS }; // On 32-bit platforms, there is a slight performance advantage to doing all // of the array offset/index arithmetic with SMIs, since it's possible // to save a few tag/untag operations without paying an extra expense when // calculating array offset (the smi math can be folded away) and there are // fewer live ranges. Thus only convert indices to untagged value on 64-bit // platforms. ParameterMode OptimalParameterMode() const { return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS; } MachineRepresentation ParameterRepresentation(ParameterMode mode) const { return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation() : MachineRepresentation::kTaggedSigned; } MachineRepresentation OptimalParameterRepresentation() const { return ParameterRepresentation(OptimalParameterMode()); } TNode
ParameterToIntPtr(Node* value, ParameterMode mode) { if (mode == SMI_PARAMETERS) value = SmiUntag(value); return UncheckedCast
(value); } Node* IntPtrToParameter(SloppyTNode
value, ParameterMode mode) { if (mode == SMI_PARAMETERS) return SmiTag(value); return value; } Node* Int32ToParameter(SloppyTNode
value, ParameterMode mode) { return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode); } TNode
ParameterToTagged(Node* value, ParameterMode mode) { if (mode != SMI_PARAMETERS) return SmiTag(value); return UncheckedCast
(value); } Node* TaggedToParameter(SloppyTNode
value, ParameterMode mode) { if (mode != SMI_PARAMETERS) return SmiUntag(value); return value; } TNode
TaggedToSmi(TNode
value, Label* fail) { GotoIf(TaggedIsNotSmi(value), fail); return UncheckedCast
(value); } TNode
TaggedToNumber(TNode
value, Label* fail) { GotoIfNot(IsNumber(value), fail); return UncheckedCast
(value); } TNode
TaggedToHeapObject(TNode
value, Label* fail) { GotoIf(TaggedIsSmi(value), fail); return UncheckedCast
(value); } TNode
HeapObjectToJSArray(TNode
heap_object, Label* fail) { GotoIfNot(IsJSArray(heap_object), fail); return UncheckedCast
(heap_object); } TNode
TaggedToFastJSArray(TNode
context, TNode
value, Label* fail) { GotoIf(TaggedIsSmi(value), fail); TNode
heap_object = CAST(value); GotoIfNot(IsFastJSArray(heap_object, context), fail); return UncheckedCast
(heap_object); } TNode
HeapObjectToJSDataView(TNode
heap_object, Label* fail) { GotoIfNot(IsJSDataView(heap_object), fail); return CAST(heap_object); } TNode
HeapObjectToCallable(TNode
heap_object, Label* fail) { GotoIfNot(IsCallable(heap_object), fail); return CAST(heap_object); } TNode
UnsafeCastNumberToHeapNumber(TNode
p_n) { return CAST(p_n); } TNode
UnsafeCastObjectToFixedArrayBase(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToFixedArray(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToFixedDoubleArray( TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToHeapNumber(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToCallable(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToSmi(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToNumber(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToHeapObject(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToJSArray(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToFixedTypedArrayBase( TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToCompareBuiltinFn(TNode
p_o) { return p_o; } TNode
UnsafeCastObjectToLoadFn(TNode
p_o) { return p_o; } TNode
UnsafeCastObjectToStoreFn(TNode
p_o) { return p_o; } TNode
UnsafeCastObjectToCanUseSameAccessorFn(TNode
p_o) { return p_o; } TNode
UnsafeCastObjectToNumberDictionary( TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToJSReceiver(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToJSObject(TNode
p_o) { return CAST(p_o); } TNode
UnsafeCastObjectToMap(TNode
p_o) { return CAST(p_o); } Node* MatchesParameterMode(Node* value, ParameterMode mode); #define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ Node* OpName(Node* a, Node* b, ParameterMode mode) { \ if (mode == SMI_PARAMETERS) { \ return SmiOpName(CAST(a), CAST(b)); \ } else { \ DCHECK_EQ(INTPTR_PARAMETERS, mode); \ return IntPtrOpName(a, b); \ } \ } PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin) PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan) PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, SmiLessThanOrEqual) PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan) PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, SmiGreaterThanOrEqual) PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow) PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual, SmiAboveOrEqual) #undef PARAMETER_BINOP TNode
NoContextConstant(); #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode
().rootAccessorName())>::type> \ name##Constant(); HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode
().rootAccessorName())>::type> \ name##Constant(); HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ TNode
Is##name(SloppyTNode
value); \ TNode
IsNot##name(SloppyTNode
value); HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST TNode
HashSeed(); TNode
HashSeedHigh(); TNode
HashSeedLow(); Node* IntPtrOrSmiConstant(int value, ParameterMode mode); TNode
LanguageModeConstant(LanguageMode mode) { return SmiConstant(static_cast
(mode)); } bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode); // Round the 32bits payload of the provided word up to the next power of two. TNode
IntPtrRoundUpToPowerOfTwo32(TNode
value); // Select the maximum of the two provided IntPtr values. TNode
IntPtrMax(SloppyTNode
left, SloppyTNode
right); // Select the minimum of the two provided IntPtr values. TNode
IntPtrMin(SloppyTNode
left, SloppyTNode
right); // Float64 operations. TNode
Float64Ceil(SloppyTNode
x); TNode
Float64Floor(SloppyTNode
x); TNode
Float64Round(SloppyTNode
x); TNode
Float64RoundToEven(SloppyTNode
x); TNode
Float64Trunc(SloppyTNode
x); // Select the minimum of the two provided Number values. TNode
NumberMax(SloppyTNode
left, SloppyTNode
right); // Select the minimum of the two provided Number values. TNode
NumberMin(SloppyTNode
left, SloppyTNode
right); // After converting an index to an integer, calculate a relative index: if // index < 0, max(length + index, 0); else min(index, length) TNode
ConvertToRelativeIndex(TNode
context, TNode
index, TNode
length); // Returns true iff the given value fits into smi range and is >= 0. TNode
IsValidPositiveSmi(TNode
value); // Tag an IntPtr as a Smi value. TNode
SmiTag(SloppyTNode
value); // Untag a Smi value as an IntPtr. TNode
SmiUntag(SloppyTNode
value); // Smi conversions. TNode
SmiToFloat64(SloppyTNode
value); TNode
SmiFromIntPtr(SloppyTNode
value) { return SmiTag(value); } TNode
SmiFromInt32(SloppyTNode
value); TNode
SmiToIntPtr(SloppyTNode
value) { return SmiUntag(value); } TNode
SmiToInt32(SloppyTNode
value); // Smi operations. #define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode
SmiOpName(TNode
a, TNode
b) { \ if (SmiValuesAre32Bits()) { \ return BitcastWordToTaggedSigned( \ IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \ } else { \ DCHECK(SmiValuesAre31Bits()); \ if (kPointerSize == kInt64Size) { \ CSA_ASSERT(this, IsValidSmi(a)); \ CSA_ASSERT(this, IsValidSmi(b)); \ } \ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \ } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And) SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or) #undef SMI_ARITHMETIC_BINOP TNode
SmiInc(TNode
value) { return SmiAdd(value, SmiConstant(1)); } TNode
TrySmiAdd(TNode
a, TNode
b, Label* if_overflow); TNode
TrySmiSub(TNode
a, TNode
b, Label* if_overflow); TNode
SmiShl(TNode
a, int shift) { return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift)); } TNode
SmiShr(TNode
a, int shift) { return BitcastWordToTaggedSigned( WordAnd(WordShr(BitcastTaggedToWord(a), shift), BitcastTaggedToWord(SmiConstant(-1)))); } Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiShl(CAST(a), shift); } else { DCHECK_EQ(INTPTR_PARAMETERS, mode); return WordShl(a, shift); } } Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiShr(CAST(a), shift); } else { DCHECK_EQ(INTPTR_PARAMETERS, mode); return WordShr(a, shift); } } #define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode
SmiOpName(TNode
a, TNode
b) { \ if (SmiValuesAre32Bits()) { \ return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \ } else { \ DCHECK(SmiValuesAre31Bits()); \ if (kPointerSize == kInt64Size) { \ CSA_ASSERT(this, IsValidSmi(a)); \ CSA_ASSERT(this, IsValidSmi(b)); \ } \ return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \ } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan) SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual, Uint32GreaterThanOrEqual) SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan) SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan) SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual, Int32LessThanOrEqual) SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan) SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, Int32GreaterThanOrEqual) #undef SMI_COMPARISON_OP TNode
SmiMax(TNode
a, TNode
b); TNode
SmiMin(TNode
a, TNode
b); // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. TNode
SmiMod(TNode
a, TNode
b); // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. TNode
SmiMul(TNode
a, TNode
b); // Tries to compute dividend / divisor for Smi inputs; branching to bailout // if the division needs to be performed as a floating point operation. TNode
TrySmiDiv(TNode
dividend, TNode
divisor, Label* bailout); // Smi | HeapNumber operations. TNode
NumberInc(SloppyTNode
value); TNode
NumberDec(SloppyTNode
value); TNode
NumberAdd(SloppyTNode
a, SloppyTNode
b); TNode
NumberSub(SloppyTNode
a, SloppyTNode
b); void GotoIfNotNumber(Node* value, Label* is_not_number); void GotoIfNumber(Node* value, Label* is_number); TNode
SmiToNumber(TNode
v) { return v; } TNode
BitwiseOp(Node* left32, Node* right32, Operation bitwise_op); // Allocate an object of the given size. Node* AllocateInNewSpace(Node* size, AllocationFlags flags = kNone); Node* AllocateInNewSpace(int size, AllocationFlags flags = kNone); Node* Allocate(Node* size, AllocationFlags flags = kNone); Node* Allocate(int size, AllocationFlags flags = kNone); Node* InnerAllocate(Node* previous, int offset); Node* InnerAllocate(Node* previous, Node* offset); Node* IsRegularHeapObjectSize(Node* size); typedef std::function
BranchGenerator; typedef std::function
NodeGenerator; void Assert(const BranchGenerator& branch, const char* message = nullptr, const char* file = nullptr, int line = 0, Node* extra_node1 = nullptr, const char* extra_node1_name = "", Node* extra_node2 = nullptr, const char* extra_node2_name = "", Node* extra_node3 = nullptr, const char* extra_node3_name = "", Node* extra_node4 = nullptr, const char* extra_node4_name = "", Node* extra_node5 = nullptr, const char* extra_node5_name = ""); void Assert(const NodeGenerator& condition_body, const char* message = nullptr, const char* file = nullptr, int line = 0, Node* extra_node1 = nullptr, const char* extra_node1_name = "", Node* extra_node2 = nullptr, const char* extra_node2_name = "", Node* extra_node3 = nullptr, const char* extra_node3_name = "", Node* extra_node4 = nullptr, const char* extra_node4_name = "", Node* extra_node5 = nullptr, const char* extra_node5_name = ""); void Check(const BranchGenerator& branch, const char* message = nullptr, const char* file = nullptr, int line = 0, Node* extra_node1 = nullptr, const char* extra_node1_name = "", Node* extra_node2 = nullptr, const char* extra_node2_name = "", Node* extra_node3 = nullptr, const char* extra_node3_name = "", Node* extra_node4 = nullptr, const char* extra_node4_name = "", Node* extra_node5 = nullptr, const char* extra_node5_name = ""); void Check(const NodeGenerator& condition_body, const char* message = nullptr, const char* file = nullptr, int line = 0, Node* extra_node1 = nullptr, const char* extra_node1_name = "", Node* extra_node2 = nullptr, const char* extra_node2_name = "", Node* extra_node3 = nullptr, const char* extra_node3_name = "", Node* extra_node4 = nullptr, const char* extra_node4_name = "", Node* extra_node5 = nullptr, const char* extra_node5_name = ""); void FastCheck(TNode
condition); // The following Call wrappers call an object according to the semantics that // one finds in the EcmaScript spec, operating on an Callable (e.g. a // JSFunction or proxy) rather than a Code object. template
TNode
Call(TNode
context, TNode
callable, TNode
receiver, TArgs... args) { return UncheckedCast
(CallJS( CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), context, callable, receiver, args...)); } template
TNode
Call(TNode
context, TNode
callable, TNode
receiver, TArgs... args) { if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) { return UncheckedCast
(CallJS( CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), context, callable, receiver, args...)); } return UncheckedCast
(CallJS(CodeFactory::Call(isolate()), context, callable, receiver, args...)); } template
TNode
Select(SloppyTNode
condition, const F& true_body, const G& false_body) { return UncheckedCast
(SelectImpl( condition, [&]() -> Node* { return implicit_cast
>(true_body()); }, [&]() -> Node* { return implicit_cast
>(false_body()); }, MachineRepresentationOf
::value)); } template
TNode
SelectConstant(TNode
condition, TNode
true_value, TNode
false_value) { return Select
(condition, [=] { return true_value; }, [=] { return false_value; }); } TNode
SelectInt32Constant(SloppyTNode
condition, int true_value, int false_value); TNode
SelectIntPtrConstant(SloppyTNode
condition, int true_value, int false_value); TNode
SelectBooleanConstant(SloppyTNode
condition); TNode
SelectSmiConstant(SloppyTNode
condition, Smi* true_value, Smi* false_value); TNode
SelectSmiConstant(SloppyTNode
condition, int true_value, Smi* false_value) { return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value); } TNode
SelectSmiConstant(SloppyTNode
condition, Smi* true_value, int false_value) { return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value)); } TNode
SelectSmiConstant(SloppyTNode
condition, int true_value, int false_value) { return SelectSmiConstant(condition, Smi::FromInt(true_value), Smi::FromInt(false_value)); } TNode
TruncateIntPtrToInt32(SloppyTNode
value); // Check a value for smi-ness TNode
TaggedIsSmi(SloppyTNode
a); TNode
TaggedIsSmi(TNode
a); TNode
TaggedIsNotSmi(SloppyTNode
a); // Check that the value is a non-negative smi. TNode
TaggedIsPositiveSmi(SloppyTNode
a); // Check that a word has a word-aligned address. TNode
WordIsWordAligned(SloppyTNode
word); TNode
WordIsPowerOfTwo(SloppyTNode
value); #if DEBUG void Bind(Label* label, AssemblerDebugInfo debug_info); #else void Bind(Label* label); #endif // DEBUG void BranchIfSmiEqual(TNode
a, TNode
b, Label* if_true, Label* if_false) { Branch(SmiEqual(a, b), if_true, if_false); } void BranchIfSmiLessThan(TNode
a, TNode
b, Label* if_true, Label* if_false) { Branch(SmiLessThan(a, b), if_true, if_false); } void BranchIfSmiLessThanOrEqual(TNode
a, TNode
b, Label* if_true, Label* if_false) { Branch(SmiLessThanOrEqual(a, b), if_true, if_false); } void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { Branch(Float64Equal(value, value), if_false, if_true); } // Branches to {if_true} if ToBoolean applied to {value} yields true, // otherwise goes to {if_false}. void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false); void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false); void BranchIfFastJSArray(Node* object, Node* context, Label* if_true, Label* if_false, bool iteration_only = false); void BranchIfNotFastJSArray(Node* object, Node* context, Label* if_true, Label* if_false) { BranchIfFastJSArray(object, context, if_false, if_true); } void BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true, Label* if_false); // Branches to {if_true} when --force-slow-path flag has been passed. // It's used for testing to ensure that slow path implementation behave // equivalent to corresponding fast paths (where applicable). // // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); // Load value from current frame by given offset in bytes. Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged()); // Load value from current parent frame by given offset in bytes. Node* LoadFromParentFrame(int offset, MachineType rep = MachineType::AnyTagged()); // Load target function from the current JS frame. // This is an alternative way of getting the target function in addition to // Parameter(Descriptor::kJSTarget). The latter should be used near the // beginning of builtin code while the target value is still in the register // and the former should be used in slow paths in order to reduce register // pressure on the fast path. TNode
LoadTargetFromFrame(); // Load an object pointer from a buffer that isn't in the heap. Node* LoadBufferObject(Node* buffer, int offset, MachineType rep = MachineType::AnyTagged()); // Load a field from an object on the heap. Node* LoadObjectField(SloppyTNode
object, int offset, MachineType rep); template
, TNode
>::value, int>::type = 0> TNode
LoadObjectField(TNode
object, int offset) { return CAST(LoadObjectField(object, offset, MachineTypeOf