HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Android 10
|
10.0.0_r6
下载
查看原文件
收藏
根目录
external
v8
src
code-stub-assembler.cc
// Copyright 2016 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/code-stub-assembler.h" #include "src/code-factory.h" #include "src/frames-inl.h" #include "src/frames.h" #include "src/objects/api-callbacks.h" #include "src/objects/descriptor-array.h" #include "src/objects/ordered-hash-table-inl.h" #include "src/wasm/wasm-objects.h" namespace v8 { namespace internal { using compiler::Node; template
using TNode = compiler::TNode
; template
using SloppyTNode = compiler::SloppyTNode
; CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) : compiler::CodeAssembler(state) { if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) { HandleBreakOnNode(); } } void CodeStubAssembler::HandleBreakOnNode() { // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a // string specifying the name of a stub and NODE is number specifying node id. const char* name = state()->name(); size_t name_length = strlen(name); if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) { // Different name. return; } size_t option_length = strlen(FLAG_csa_trap_on_node); if (option_length < name_length + 2 || FLAG_csa_trap_on_node[name_length] != ',') { // Option is too short. return; } const char* start = &FLAG_csa_trap_on_node[name_length + 1]; char* end; int node_id = static_cast
(strtol(start, &end, 10)); if (start == end) { // Bad node id. return; } BreakOnNode(node_id); } void CodeStubAssembler::Assert(const BranchGenerator& branch, const char* message, const char* file, int line, Node* extra_node1, const char* extra_node1_name, Node* extra_node2, const char* extra_node2_name, Node* extra_node3, const char* extra_node3_name, Node* extra_node4, const char* extra_node4_name, Node* extra_node5, const char* extra_node5_name) { #if defined(DEBUG) if (FLAG_debug_code) { Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2, extra_node2_name, extra_node3, extra_node3_name, extra_node4, extra_node4_name, extra_node5, extra_node5_name); } #endif } void CodeStubAssembler::Assert(const NodeGenerator& condition_body, const char* message, const char* file, int line, Node* extra_node1, const char* extra_node1_name, Node* extra_node2, const char* extra_node2_name, Node* extra_node3, const char* extra_node3_name, Node* extra_node4, const char* extra_node4_name, Node* extra_node5, const char* extra_node5_name) { #if defined(DEBUG) if (FLAG_debug_code) { Check(condition_body, message, file, line, extra_node1, extra_node1_name, extra_node2, extra_node2_name, extra_node3, extra_node3_name, extra_node4, extra_node4_name, extra_node5, extra_node5_name); } #endif } #ifdef DEBUG namespace { void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node, const char* node_name) { if (node != nullptr) { csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0), csa->StringConstant(node_name), node); } } } // namespace #endif void CodeStubAssembler::Check(const BranchGenerator& branch, const char* message, const char* file, int line, Node* extra_node1, const char* extra_node1_name, Node* extra_node2, const char* extra_node2_name, Node* extra_node3, const char* extra_node3_name, Node* extra_node4, const char* extra_node4_name, Node* extra_node5, const char* extra_node5_name) { Label ok(this); Label not_ok(this, Label::kDeferred); if (message != nullptr && FLAG_code_comments) { Comment("[ Assert: %s", message); } else { Comment("[ Assert"); } branch(&ok, ¬_ok); BIND(¬_ok); DCHECK_NOT_NULL(message); char chars[1024]; Vector
buffer(chars); if (file != nullptr) { SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line); } else { SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message); } Node* message_node = StringConstant(&(buffer[0])); #ifdef DEBUG // Only print the extra nodes in debug builds. MaybePrintNodeWithName(this, extra_node1, extra_node1_name); MaybePrintNodeWithName(this, extra_node2, extra_node2_name); MaybePrintNodeWithName(this, extra_node3, extra_node3_name); MaybePrintNodeWithName(this, extra_node4, extra_node4_name); MaybePrintNodeWithName(this, extra_node5, extra_node5_name); #endif DebugAbort(message_node); Unreachable(); BIND(&ok); Comment("] Assert"); } void CodeStubAssembler::Check(const NodeGenerator& condition_body, const char* message, const char* file, int line, Node* extra_node1, const char* extra_node1_name, Node* extra_node2, const char* extra_node2_name, Node* extra_node3, const char* extra_node3_name, Node* extra_node4, const char* extra_node4_name, Node* extra_node5, const char* extra_node5_name) { BranchGenerator branch = [=](Label* ok, Label* not_ok) { Node* condition = condition_body(); DCHECK_NOT_NULL(condition); Branch(condition, ok, not_ok); }; Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2, extra_node2_name, extra_node3, extra_node3_name, extra_node4, extra_node4_name, extra_node5, extra_node5_name); } void CodeStubAssembler::FastCheck(TNode
condition) { Label ok(this); GotoIf(condition, &ok); DebugBreak(); Goto(&ok); BIND(&ok); } Node* CodeStubAssembler::SelectImpl(TNode
condition, const NodeGenerator& true_body, const NodeGenerator& false_body, MachineRepresentation rep) { VARIABLE(value, rep); Label vtrue(this), vfalse(this), end(this); Branch(condition, &vtrue, &vfalse); BIND(&vtrue); { value.Bind(true_body()); Goto(&end); } BIND(&vfalse); { value.Bind(false_body()); Goto(&end); } BIND(&end); return value.value(); } TNode
CodeStubAssembler::SelectInt32Constant( SloppyTNode
condition, int true_value, int false_value) { return SelectConstant
(condition, Int32Constant(true_value), Int32Constant(false_value)); } TNode
CodeStubAssembler::SelectIntPtrConstant( SloppyTNode
condition, int true_value, int false_value) { return SelectConstant
(condition, IntPtrConstant(true_value), IntPtrConstant(false_value)); } TNode
CodeStubAssembler::SelectBooleanConstant( SloppyTNode
condition) { return SelectConstant
(condition, TrueConstant(), FalseConstant()); } TNode
CodeStubAssembler::SelectSmiConstant(SloppyTNode
condition, Smi* true_value, Smi* false_value) { return SelectConstant
(condition, SmiConstant(true_value), SmiConstant(false_value)); } TNode
CodeStubAssembler::NoContextConstant() { return SmiConstant(Context::kNoContext); } #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode
().rootAccessorName())>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast
().rootAccessorName())>::type>( \ LoadRoot(Heap::k##rootIndexName##RootIndex)); \ } HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode
().rootAccessorName())>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast
().rootAccessorName())>::type>( \ LoadRoot(Heap::k##rootIndexName##RootIndex)); \ } HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ compiler::TNode
CodeStubAssembler::Is##name( \ SloppyTNode
value) { \ return WordEqual(value, name##Constant()); \ } \ compiler::TNode
CodeStubAssembler::IsNot##name( \ SloppyTNode
value) { \ return WordNotEqual(value, name##Constant()); \ } HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST); #undef HEAP_CONSTANT_TEST TNode
CodeStubAssembler::HashSeed() { DCHECK(Is64()); TNode
hash_seed_root = TNode
::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); return TNode
::UncheckedCast(LoadObjectField( hash_seed_root, ByteArray::kHeaderSize, MachineType::Int64())); } TNode
CodeStubAssembler::HashSeedHigh() { DCHECK(!Is64()); #ifdef V8_TARGET_BIG_ENDIAN static int kOffset = 0; #else static int kOffset = kInt32Size; #endif TNode
hash_seed_root = TNode
::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); return TNode
::UncheckedCast(LoadObjectField( hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32())); } TNode
CodeStubAssembler::HashSeedLow() { DCHECK(!Is64()); #ifdef V8_TARGET_BIG_ENDIAN static int kOffset = kInt32Size; #else static int kOffset = 0; #endif TNode
hash_seed_root = TNode
::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); return TNode
::UncheckedCast(LoadObjectField( hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32())); } Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiConstant(value); } else { DCHECK_EQ(INTPTR_PARAMETERS, mode); return IntPtrConstant(value); } } bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode) { int32_t constant_test; Smi* smi_test; if (mode == INTPTR_PARAMETERS) { if (ToInt32Constant(test, constant_test) && constant_test == 0) { return true; } } else { DCHECK_EQ(mode, SMI_PARAMETERS); if (ToSmiConstant(test, smi_test) && smi_test->value() == 0) { return true; } } return false; } bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode) { int32_t int32_constant; if (mode == INTPTR_PARAMETERS) { if (ToInt32Constant(maybe_constant, int32_constant)) { *value = int32_constant; return true; } } else { DCHECK_EQ(mode, SMI_PARAMETERS); Smi* smi_constant; if (ToSmiConstant(maybe_constant, smi_constant)) { *value = Smi::ToInt(smi_constant); return true; } } return false; } TNode
CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32( TNode
value) { Comment("IntPtrRoundUpToPowerOfTwo32"); CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u))); value = Signed(IntPtrSub(value, IntPtrConstant(1))); for (int i = 1; i <= 16; i *= 2) { value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i)))); } return Signed(IntPtrAdd(value, IntPtrConstant(1))); } Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return TaggedIsSmi(value); } else { return Int32Constant(1); } } TNode
CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode
value) { // value && !(value & (value - 1)) return WordEqual( Select
( WordEqual(value, IntPtrConstant(0)), [=] { return IntPtrConstant(1); }, [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }), IntPtrConstant(0)); } TNode
CodeStubAssembler::Float64Round(SloppyTNode
x) { Node* one = Float64Constant(1.0); Node* one_half = Float64Constant(0.5); Label return_x(this); // Round up {x} towards Infinity. VARIABLE(var_x, MachineRepresentation::kFloat64, Float64Ceil(x)); GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_x); BIND(&return_x); return TNode
::UncheckedCast(var_x.value()); } TNode
CodeStubAssembler::Float64Ceil(SloppyTNode
x) { if (IsFloat64RoundUpSupported()) { return Float64RoundUp(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); VARIABLE(var_x, MachineRepresentation::kFloat64, x); Label return_x(this), return_minus_x(this); // Check if {x} is greater than zero. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); BIND(&if_xgreaterthanzero); { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoIfNot(Float64LessThan(var_x.value(), x), &return_x); var_x.Bind(Float64Add(var_x.value(), one)); Goto(&return_x); } BIND(&if_xnotgreaterthanzero); { // Just return {x} unless it's in the range ]-2^52,0[ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoIfNot(Float64LessThan(x, zero), &return_x); // Round negated {x} towards Infinity and return the result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_minus_x); } BIND(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); BIND(&return_x); return TNode
::UncheckedCast(var_x.value()); } TNode
CodeStubAssembler::Float64Floor(SloppyTNode
x) { if (IsFloat64RoundDownSupported()) { return Float64RoundDown(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); VARIABLE(var_x, MachineRepresentation::kFloat64, x); Label return_x(this), return_minus_x(this); // Check if {x} is greater than zero. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); BIND(&if_xgreaterthanzero); { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards -Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_x); } BIND(&if_xnotgreaterthanzero); { // Just return {x} unless it's in the range ]-2^52,0[ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoIfNot(Float64LessThan(x, zero), &return_x); // Round negated {x} towards -Infinity and return the result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Add(var_x.value(), one)); Goto(&return_minus_x); } BIND(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); BIND(&return_x); return TNode
::UncheckedCast(var_x.value()); } TNode
CodeStubAssembler::Float64RoundToEven(SloppyTNode
x) { if (IsFloat64RoundTiesEvenSupported()) { return Float64RoundTiesEven(x); } // See ES#sec-touint8clamp for details. Node* f = Float64Floor(x); Node* f_and_half = Float64Add(f, Float64Constant(0.5)); VARIABLE(var_result, MachineRepresentation::kFloat64); Label return_f(this), return_f_plus_one(this), done(this); GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one); GotoIf(Float64LessThan(x, f_and_half), &return_f); { Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0)); Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f, &return_f_plus_one); } BIND(&return_f); var_result.Bind(f); Goto(&done); BIND(&return_f_plus_one); var_result.Bind(Float64Add(f, Float64Constant(1.0))); Goto(&done); BIND(&done); return TNode
::UncheckedCast(var_result.value()); } TNode
CodeStubAssembler::Float64Trunc(SloppyTNode
x) { if (IsFloat64RoundTruncateSupported()) { return Float64RoundTruncate(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); VARIABLE(var_x, MachineRepresentation::kFloat64, x); Label return_x(this), return_minus_x(this); // Check if {x} is greater than 0. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); BIND(&if_xgreaterthanzero); { if (IsFloat64RoundDownSupported()) { var_x.Bind(Float64RoundDown(x)); } else { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards -Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); } Goto(&return_x); } BIND(&if_xnotgreaterthanzero); { if (IsFloat64RoundUpSupported()) { var_x.Bind(Float64RoundUp(x)); Goto(&return_x); } else { // Just return {x} unless its in the range ]-2^52,0[. GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoIfNot(Float64LessThan(x, zero), &return_x); // Round negated {x} towards -Infinity and return result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_minus_x); } } BIND(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); BIND(&return_x); return TNode
::UncheckedCast(var_x.value()); } TNode
CodeStubAssembler::IsValidSmi(TNode
smi) { if (SmiValuesAre31Bits() && kPointerSize == kInt64Size) { // Check that the Smi value is properly sign-extended. TNode
value = Signed(BitcastTaggedToWord(smi)); return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value))); } return Int32TrueConstant(); } Node* CodeStubAssembler::SmiShiftBitsConstant() { return IntPtrConstant(kSmiShiftSize + kSmiTagSize); } TNode
CodeStubAssembler::SmiFromInt32(SloppyTNode
value) { TNode
value_intptr = ChangeInt32ToIntPtr(value); TNode
smi = BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant())); #if V8_COMPRESS_POINTERS CSA_ASSERT(this, IsValidSmi(smi)); #endif return smi; } TNode
CodeStubAssembler::IsValidPositiveSmi(TNode
value) { intptr_t constant_value; if (ToIntPtrConstant(value, constant_value)) { return (static_cast
(constant_value) <= static_cast
(Smi::kMaxValue)) ? Int32TrueConstant() : Int32FalseConstant(); } return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue)); } TNode
CodeStubAssembler::SmiTag(SloppyTNode
value) { int32_t constant_value; if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) { return SmiConstant(constant_value); } TNode
smi = BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); #if V8_COMPRESS_POINTERS CSA_ASSERT(this, IsValidSmi(smi)); #endif return smi; } TNode
CodeStubAssembler::SmiUntag(SloppyTNode
value) { #if V8_COMPRESS_POINTERS CSA_ASSERT(this, IsValidSmi(value)); #endif intptr_t constant_value; if (ToIntPtrConstant(value, constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant())); } TNode
CodeStubAssembler::SmiToInt32(SloppyTNode
value) { TNode
result = SmiUntag(value); return TruncateIntPtrToInt32(result); } TNode
CodeStubAssembler::SmiToFloat64(SloppyTNode
value) { return ChangeInt32ToFloat64(SmiToInt32(value)); } TNode
CodeStubAssembler::SmiMax(TNode
a, TNode
b) { return SelectConstant
(SmiLessThan(a, b), b, a); } TNode
CodeStubAssembler::SmiMin(TNode
a, TNode
b) { return SelectConstant
(SmiLessThan(a, b), a, b); } TNode
CodeStubAssembler::TrySmiAdd(TNode
lhs, TNode
rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { TNode
> pair = IntPtrAddWithOverflow( BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs)); TNode
overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode
result = Projection<0>(pair); return BitcastWordToTaggedSigned(result); } else { DCHECK(SmiValuesAre31Bits()); TNode
> pair = Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); TNode
overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode
result = Projection<0>(pair); return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); } } TNode
CodeStubAssembler::TrySmiSub(TNode
lhs, TNode
rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { TNode
> pair = IntPtrSubWithOverflow( BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs)); TNode
overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode
result = Projection<0>(pair); return BitcastWordToTaggedSigned(result); } else { DCHECK(SmiValuesAre31Bits()); TNode
> pair = Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); TNode
overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode
result = Projection<0>(pair); return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); } } TNode
CodeStubAssembler::NumberMax(SloppyTNode
a, SloppyTNode
b) { // TODO(danno): This could be optimized by specifically handling smi cases. VARIABLE(result, MachineRepresentation::kTagged); Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); result.Bind(NanConstant()); Goto(&done); BIND(&greater_than_equal_a); result.Bind(a); Goto(&done); BIND(&greater_than_equal_b); result.Bind(b); Goto(&done); BIND(&done); return TNode
::UncheckedCast(result.value()); } TNode
CodeStubAssembler::NumberMin(SloppyTNode
a, SloppyTNode
b) { // TODO(danno): This could be optimized by specifically handling smi cases. VARIABLE(result, MachineRepresentation::kTagged); Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); result.Bind(NanConstant()); Goto(&done); BIND(&greater_than_equal_a); result.Bind(b); Goto(&done); BIND(&greater_than_equal_b); result.Bind(a); Goto(&done); BIND(&done); return TNode
::UncheckedCast(result.value()); } TNode
CodeStubAssembler::ConvertToRelativeIndex( TNode
context, TNode
index, TNode
length) { TVARIABLE(IntPtrT, result); TNode
const index_int = ToInteger_Inline(context, index, CodeStubAssembler::kTruncateMinusZero); TNode
zero = IntPtrConstant(0); Label done(this); Label if_issmi(this), if_isheapnumber(this, Label::kDeferred); Branch(TaggedIsSmi(index_int), &if_issmi, &if_isheapnumber); BIND(&if_issmi); { TNode
const index_smi = CAST(index_int); result = Select
( IntPtrLessThan(SmiUntag(index_smi), zero), [=] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); }, [=] { return IntPtrMin(SmiUntag(index_smi), length); }); Goto(&done); } BIND(&if_isheapnumber); { // If {index} is a heap number, it is definitely out of bounds. If it is // negative, {index} = max({length} + {index}),0) = 0'. If it is positive, // set {index} to {length}. TNode
const index_hn = CAST(index_int); TNode
const float_zero = Float64Constant(0.); TNode
const index_float = LoadHeapNumberValue(index_hn); result = SelectConstant
(Float64LessThan(index_float, float_zero), zero, length); Goto(&done); } BIND(&done); return result.value(); } TNode
CodeStubAssembler::SmiMod(TNode
a, TNode
b) { TVARIABLE(Number, var_result); Label return_result(this, &var_result), return_minuszero(this, Label::kDeferred), return_nan(this, Label::kDeferred); // Untag {a} and {b}. TNode
int_a = SmiToInt32(a); TNode
int_b = SmiToInt32(b); // Return NaN if {b} is zero. GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan); // Check if {a} is non-negative. Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred); Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative, &if_aisnegative); BIND(&if_aisnotnegative); { // Fast case, don't need to check any other edge cases. TNode
r = Int32Mod(int_a, int_b); var_result = SmiFromInt32(r); Goto(&return_result); } BIND(&if_aisnegative); { if (SmiValuesAre32Bits()) { // Check if {a} is kMinInt and {b} is -1 (only relevant if the // kMinInt is actually representable as a Smi). Label join(this); GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join); GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero); Goto(&join); BIND(&join); } // Perform the integer modulus operation. TNode
r = Int32Mod(int_a, int_b); // Check if {r} is zero, and if so return -0, because we have to // take the sign of the left hand side {a}, which is negative. GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero); // The remainder {r} can be outside the valid Smi range on 32bit // architectures, so we cannot just say SmiFromInt32(r) here. var_result = ChangeInt32ToTagged(r); Goto(&return_result); } BIND(&return_minuszero); var_result = MinusZeroConstant(); Goto(&return_result); BIND(&return_nan); var_result = NanConstant(); Goto(&return_result); BIND(&return_result); return var_result.value(); } TNode
CodeStubAssembler::SmiMul(TNode
a, TNode
b) { TVARIABLE(Number, var_result); VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64); VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64); Label return_result(this, &var_result); // Both {a} and {b} are Smis. Convert them to integers and multiply. Node* lhs32 = SmiToInt32(a); Node* rhs32 = SmiToInt32(b); Node* pair = Int32MulWithOverflow(lhs32, rhs32); Node* overflow = Projection(1, pair); // Check if the multiplication overflowed. Label if_overflow(this, Label::kDeferred), if_notoverflow(this); Branch(overflow, &if_overflow, &if_notoverflow); BIND(&if_notoverflow); { // If the answer is zero, we may need to return -0.0, depending on the // input. Label answer_zero(this), answer_not_zero(this); Node* answer = Projection(0, pair); Node* zero = Int32Constant(0); Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero); BIND(&answer_not_zero); { var_result = ChangeInt32ToTagged(answer); Goto(&return_result); } BIND(&answer_zero); { Node* or_result = Word32Or(lhs32, rhs32); Label if_should_be_negative_zero(this), if_should_be_zero(this); Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, &if_should_be_zero); BIND(&if_should_be_negative_zero); { var_result = MinusZeroConstant(); Goto(&return_result); } BIND(&if_should_be_zero); { var_result = SmiConstant(0); Goto(&return_result); } } } BIND(&if_overflow); { var_lhs_float64.Bind(SmiToFloat64(a)); var_rhs_float64.Bind(SmiToFloat64(b)); Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value()); var_result = AllocateHeapNumberWithValue(value); Goto(&return_result); } BIND(&return_result); return var_result.value(); } TNode
CodeStubAssembler::TrySmiDiv(TNode
dividend, TNode
divisor, Label* bailout) { // Both {a} and {b} are Smis. Bailout to floating point division if {divisor} // is zero. GotoIf(WordEqual(divisor, SmiConstant(0)), bailout); // Do floating point division if {dividend} is zero and {divisor} is // negative. Label dividend_is_zero(this), dividend_is_not_zero(this); Branch(WordEqual(dividend, SmiConstant(0)), ÷nd_is_zero, ÷nd_is_not_zero); BIND(÷nd_is_zero); { GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout); Goto(÷nd_is_not_zero); } BIND(÷nd_is_not_zero); TNode
untagged_divisor = SmiToInt32(divisor); TNode
untagged_dividend = SmiToInt32(dividend); // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 // if the Smi size is 31) and {divisor} is -1. Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), &divisor_is_minus_one, &divisor_is_not_minus_one); BIND(&divisor_is_minus_one); { GotoIf(Word32Equal( untagged_dividend, Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), bailout); Goto(&divisor_is_not_minus_one); } BIND(&divisor_is_not_minus_one); TNode
untagged_result = Int32Div(untagged_dividend, untagged_divisor); TNode
truncated = Signed(Int32Mul(untagged_result, untagged_divisor)); // Do floating point division if the remainder is not 0. GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); return SmiFromInt32(untagged_result); } TNode
CodeStubAssembler::TruncateIntPtrToInt32( SloppyTNode
value) { if (Is64()) { return TruncateInt64ToInt32(ReinterpretCast
(value)); } return ReinterpretCast
(value); } TNode
CodeStubAssembler::TaggedIsSmi(SloppyTNode
a) { return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), IntPtrConstant(0)); } TNode
CodeStubAssembler::TaggedIsSmi(TNode
a) { return WordEqual( WordAnd(BitcastMaybeObjectToWord(a), IntPtrConstant(kSmiTagMask)), IntPtrConstant(0)); } TNode
CodeStubAssembler::TaggedIsNotSmi(SloppyTNode
a) { return WordNotEqual( WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), IntPtrConstant(0)); } TNode
CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode
a) { return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask | kSmiSignMask)), IntPtrConstant(0)); } TNode
CodeStubAssembler::WordIsWordAligned(SloppyTNode
word) { return WordEqual(IntPtrConstant(0), WordAnd(word, IntPtrConstant(kPointerSize - 1))); } #if DEBUG void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { CodeAssembler::Bind(label, debug_info); } #else void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); } #endif // DEBUG TNode
CodeStubAssembler::LoadDoubleWithHoleCheck( TNode
array, TNode
index, Label* if_hole) { return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0, SMI_PARAMETERS, if_hole); } void CodeStubAssembler::BranchIfPrototypesHaveNoElements( Node* receiver_map, Label* definitely_no_elements, Label* possibly_elements) { CSA_SLOW_ASSERT(this, IsMap(receiver_map)); VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); Label loop_body(this, &var_map); Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex); Node* empty_slow_element_dictionary = LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex); Goto(&loop_body); BIND(&loop_body); { Node* map = var_map.value(); Node* prototype = LoadMapPrototype(map); GotoIf(IsNull(prototype), definitely_no_elements); Node* prototype_map = LoadMap(prototype); TNode
prototype_instance_type = LoadMapInstanceType(prototype_map); // Pessimistically assume elements if a Proxy, Special API Object, // or JSValue wrapper is found on the prototype chain. After this // instance type check, it's not necessary to check for interceptors or // access checks. Label if_custom(this, Label::kDeferred), if_notcustom(this); Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type), &if_custom, &if_notcustom); BIND(&if_custom); { // For string JSValue wrappers we still support the checks as long // as they wrap the empty string. GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE), possibly_elements); Node* prototype_value = LoadJSValueValue(prototype); Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); } BIND(&if_notcustom); { Node* prototype_elements = LoadElements(prototype); var_map.Bind(prototype_map); GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body); Branch(WordEqual(prototype_elements, empty_slow_element_dictionary), &loop_body, possibly_elements); } } } void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false) { GotoIf(TaggedIsSmi(object), if_false); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); Branch(IsJSReceiver(object), if_true, if_false); } TNode
CodeStubAssembler::IsFastJSArray(SloppyTNode
object, SloppyTNode
context) { Label if_true(this), if_false(this, Label::kDeferred), exit(this); BranchIfFastJSArray(object, context, &if_true, &if_false); TVARIABLE(BoolT, var_result); BIND(&if_true); { var_result = Int32TrueConstant(); Goto(&exit); } BIND(&if_false); { var_result = Int32FalseConstant(); Goto(&exit); } BIND(&exit); return var_result.value(); } TNode
CodeStubAssembler::IsFastJSArrayWithNoCustomIteration( TNode
object, TNode
context) { Label if_false(this, Label::kDeferred), if_fast(this), exit(this); TVARIABLE(BoolT, var_result); BranchIfFastJSArray(object, context, &if_fast, &if_false, true); BIND(&if_fast); { // Check that the Array.prototype hasn't been modified in a way that would // affect iteration. Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex); DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell()); var_result = WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), SmiConstant(Isolate::kProtectorValid)); Goto(&exit); } BIND(&if_false); { var_result = Int32FalseConstant(); Goto(&exit); } BIND(&exit); return var_result.value(); } void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context, Label* if_true, Label* if_false, bool iteration_only) { GotoIfForceSlowPath(if_false); // Bailout if receiver is a Smi. GotoIf(TaggedIsSmi(object), if_false); Node* map = LoadMap(object); GotoIfNot(IsJSArrayMap(map), if_false); // Bailout if receiver has slow elements. Node* elements_kind = LoadMapElementsKind(map); GotoIfNot(IsFastElementsKind(elements_kind), if_false); // Verify that our prototype is the initial array prototype. GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false); if (iteration_only) { // If we are only iterating over the array, there is no need to check // the NoElements protector if the array is not holey. GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true); } Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true); } void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true, Label* if_false) { GotoIf(IsArraySpeciesProtectorCellInvalid(), if_false); BranchIfFastJSArray(object, context, if_true, if_false); } void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { #ifdef V8_ENABLE_FORCE_SLOW_PATH Node* const force_slow_path_addr = ExternalConstant(ExternalReference::force_slow_path(isolate())); Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr); GotoIf(force_slow, if_true); #endif } Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { // TODO(jgruber, chromium:848672): TNodeify AllocateRaw. // TODO(jgruber, chromium:848672): Call FatalProcessOutOfMemory if this fails. { intptr_t constant_value; if (ToIntPtrConstant(size_in_bytes, constant_value)) { CHECK(Internals::IsValidSmi(constant_value)); CHECK_GT(constant_value, 0); } else { CSA_CHECK(this, IsValidPositiveSmi(UncheckedCast
(size_in_bytes))); } } Node* top = Load(MachineType::Pointer(), top_address); Node* limit = Load(MachineType::Pointer(), limit_address); // If there's not enough space, call the runtime. VARIABLE(result, MachineRepresentation::kTagged); Label runtime_call(this, Label::kDeferred), no_runtime_call(this); Label merge_runtime(this, &result); bool needs_double_alignment = flags & kDoubleAlignment; if (flags & kAllowLargeObjectAllocation) { Label next(this); GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); Node* runtime_flags = SmiConstant( Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) | AllocateTargetSpace::encode(AllocationSpace::LO_SPACE))); Node* const runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), SmiTag(size_in_bytes), runtime_flags); result.Bind(runtime_result); Goto(&merge_runtime); BIND(&next); } VARIABLE(adjusted_size, MachineType::PointerRepresentation(), size_in_bytes); if (needs_double_alignment) { Label not_aligned(this), done_alignment(this, &adjusted_size); Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), ¬_aligned, &done_alignment); BIND(¬_aligned); Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4)); adjusted_size.Bind(not_aligned_size); Goto(&done_alignment); BIND(&done_alignment); } Node* new_top = IntPtrAdd(top, adjusted_size.value()); Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, &no_runtime_call); BIND(&runtime_call); Node* runtime_result; if (flags & kPretenured) { Node* runtime_flags = SmiConstant( Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) | AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE))); runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), SmiTag(size_in_bytes), runtime_flags); } else { runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(), SmiTag(size_in_bytes)); } result.Bind(runtime_result); Goto(&merge_runtime); // When there is enough space, return `top' and bump it up. BIND(&no_runtime_call); Node* no_runtime_result = top; StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, new_top); VARIABLE(address, MachineType::PointerRepresentation(), no_runtime_result); if (needs_double_alignment) { Label needs_filler(this), done_filling(this, &address); Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling, &needs_filler); BIND(&needs_filler); // Store a filler and increase the address by kPointerSize. StoreNoWriteBarrier(MachineRepresentation::kTagged, top, LoadRoot(Heap::kOnePointerFillerMapRootIndex)); address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4))); Goto(&done_filling); BIND(&done_filling); } no_runtime_result = BitcastWordToTagged( IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag))); result.Bind(no_runtime_result); Goto(&merge_runtime); BIND(&merge_runtime); return result.value(); } Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { DCHECK_EQ(flags & kDoubleAlignment, 0); return AllocateRaw(size_in_bytes, flags, top_address, limit_address); } Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { #if defined(V8_HOST_ARCH_32_BIT) return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address, limit_address); #elif defined(V8_HOST_ARCH_64_BIT) // Allocation on 64 bit machine is naturally double aligned return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address, limit_address); #else #error Architecture not supported #endif } Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes, AllocationFlags flags) { DCHECK(flags == kNone || flags == kDoubleAlignment); CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); return Allocate(size_in_bytes, flags); } Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) { Comment("Allocate"); bool const new_space = !(flags & kPretenured); Node* top_address = ExternalConstant( new_space ? ExternalReference::new_space_allocation_top_address(isolate()) : ExternalReference::old_space_allocation_top_address(isolate())); DCHECK_EQ(kPointerSize, ExternalReference::new_space_allocation_limit_address(isolate()) .address() - ExternalReference::new_space_allocation_top_address(isolate()) .address()); DCHECK_EQ(kPointerSize, ExternalReference::old_space_allocation_limit_address(isolate()) .address() - ExternalReference::old_space_allocation_top_address(isolate()) .address()); Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize)); if (flags & kDoubleAlignment) { return AllocateRawDoubleAligned(size_in_bytes, flags, top_address, limit_address); } else { return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address); } } Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, AllocationFlags flags) { CHECK(flags == kNone || flags == kDoubleAlignment); DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize); return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); } Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) { return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); } Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) { return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)); } Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) { return InnerAllocate(previous, IntPtrConstant(offset)); } Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) { return UintPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)); } void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false) { Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred), if_bigint(this, Label::kDeferred); // Rule out false {value}. GotoIf(WordEqual(value, FalseConstant()), if_false); // Check if {value} is a Smi or a HeapObject. Branch(TaggedIsSmi(value), &if_smi, &if_notsmi); BIND(&if_smi); { // The {value} is a Smi, only need to check against zero. BranchIfSmiEqual(CAST(value), SmiConstant(0), if_false, if_true); } BIND(&if_notsmi); { // Check if {value} is the empty string. GotoIf(IsEmptyString(value), if_false); // The {value} is a HeapObject, load its map. Node* value_map = LoadMap(value); // Only null, undefined and document.all have the undetectable bit set, // so we can return false immediately when that bit is set. GotoIf(IsUndetectableMap(value_map), if_false); // We still need to handle numbers specially, but all other {value}s // that make it here yield true. GotoIf(IsHeapNumberMap(value_map), &if_heapnumber); Branch(IsBigInt(value), &if_bigint, if_true); BIND(&if_heapnumber); { // Load the floating point value of {value}. Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset, MachineType::Float64()); // Check if the floating point {value} is neither 0.0, -0.0 nor NaN. Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)), if_true, if_false); } BIND(&if_bigint); { Node* result = CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value); CSA_ASSERT(this, IsBoolean(result)); Branch(WordEqual(result, TrueConstant()), if_true, if_false); } } } Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) { Node* frame_pointer = LoadFramePointer(); return Load(rep, frame_pointer, IntPtrConstant(offset)); } Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) { Node* frame_pointer = LoadParentFramePointer(); return Load(rep, frame_pointer, IntPtrConstant(offset)); } TNode
CodeStubAssembler::LoadTargetFromFrame() { DCHECK(IsJSFunctionCall()); return CAST(LoadFromFrame(StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer())); } Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, MachineType rep) { return Load(rep, buffer, IntPtrConstant(offset)); } Node* CodeStubAssembler::LoadObjectField(SloppyTNode
object, int offset, MachineType rep) { CSA_ASSERT(this, IsStrongHeapObject(object)); return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadObjectField(SloppyTNode
object, SloppyTNode
offset, MachineType rep) { CSA_ASSERT(this, IsStrongHeapObject(object)); return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); } TNode
CodeStubAssembler::LoadAndUntagObjectField( SloppyTNode
object, int offset) { if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN offset += kPointerSize / 2; #endif return ChangeInt32ToIntPtr( LoadObjectField(object, offset, MachineType::Int32())); } else { return SmiToIntPtr( LoadObjectField(object, offset, MachineType::AnyTagged())); } } TNode
CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object, int offset) { if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN offset += kPointerSize / 2; #endif return UncheckedCast
( LoadObjectField(object, offset, MachineType::Int32())); } else { return SmiToInt32( LoadObjectField(object, offset, MachineType::AnyTagged())); } } TNode
CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) { if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN index += kPointerSize / 2; #endif return ChangeInt32ToIntPtr( Load(MachineType::Int32(), base, IntPtrConstant(index))); } else { return SmiToIntPtr( Load(MachineType::AnyTagged(), base, IntPtrConstant(index))); } } TNode
CodeStubAssembler::LoadAndUntagToWord32Root( Heap::RootListIndex root_index) { Node* roots_array_start = ExternalConstant(ExternalReference::roots_array_start(isolate())); int index = root_index * kPointerSize; if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN index += kPointerSize / 2; #endif return UncheckedCast
( Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index))); } else { return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start, IntPtrConstant(index))); } } Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) { if (SmiValuesAre32Bits()) { int zero_offset = offset + kPointerSize / 2; int payload_offset = offset; #if V8_TARGET_LITTLE_ENDIAN std::swap(zero_offset, payload_offset); #endif StoreNoWriteBarrier(MachineRepresentation::kWord32, base, IntPtrConstant(zero_offset), Int32Constant(0)); return StoreNoWriteBarrier(MachineRepresentation::kWord32, base, IntPtrConstant(payload_offset), TruncateInt64ToInt32(value)); } else { return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base, IntPtrConstant(offset), SmiTag(value)); } } TNode
CodeStubAssembler::LoadHeapNumberValue( SloppyTNode
object) { return TNode
::UncheckedCast(LoadObjectField( object, HeapNumber::kValueOffset, MachineType::Float64())); } TNode
CodeStubAssembler::LoadMap(SloppyTNode
object) { return UncheckedCast
(LoadObjectField(object, HeapObject::kMapOffset)); } TNode
CodeStubAssembler::LoadInstanceType( SloppyTNode
object) { return LoadMapInstanceType(LoadMap(object)); } TNode
CodeStubAssembler::HasInstanceType(SloppyTNode
object, InstanceType instance_type) { return InstanceTypeEqual(LoadInstanceType(object), instance_type); } TNode
CodeStubAssembler::DoesntHaveInstanceType( SloppyTNode
object, InstanceType instance_type) { return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type)); } TNode
CodeStubAssembler::TaggedDoesntHaveInstanceType( SloppyTNode
any_tagged, InstanceType type) { /* return Phi
*/ TNode
tagged_is_smi = TaggedIsSmi(any_tagged); return Select
( tagged_is_smi, [=]() { return tagged_is_smi; }, [=]() { return DoesntHaveInstanceType(any_tagged, type); }); } TNode
CodeStubAssembler::LoadFastProperties( SloppyTNode
object) { CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); TNode
properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset); return Select
(TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); }, [=] { return CAST(properties); }); } TNode
CodeStubAssembler::LoadSlowProperties( SloppyTNode
object) { CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object))); TNode
properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset); return Select
(TaggedIsSmi(properties), [=] { return EmptyPropertyDictionaryConstant(); }, [=] { return CAST(properties); }); } TNode
CodeStubAssembler::LoadElements( SloppyTNode
object) { return CAST(LoadObjectField(object, JSObject::kElementsOffset)); } TNode
CodeStubAssembler::LoadJSArrayLength(SloppyTNode
array) { CSA_ASSERT(this, IsJSArray(array)); return CAST(LoadObjectField(array, JSArray::kLengthOffset)); } TNode
CodeStubAssembler::LoadFastJSArrayLength( SloppyTNode
array) { TNode
length = LoadJSArrayLength(array); CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(array))); // JSArray length is always a positive Smi for fast arrays. CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); return UncheckedCast
(length); } TNode
CodeStubAssembler::LoadFixedArrayBaseLength( SloppyTNode
array) { CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array)); return CAST(LoadObjectField(array, FixedArrayBase::kLengthOffset)); } TNode
CodeStubAssembler::LoadAndUntagFixedArrayBaseLength( SloppyTNode
array) { return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); } TNode
CodeStubAssembler::LoadFeedbackVectorLength( TNode
vector) { return ChangeInt32ToIntPtr( LoadObjectField
(vector, FeedbackVector::kLengthOffset)); } TNode
CodeStubAssembler::LoadWeakFixedArrayLength( TNode
array) { return CAST(LoadObjectField(array, WeakFixedArray::kLengthOffset)); } TNode
CodeStubAssembler::LoadAndUntagWeakFixedArrayLength( SloppyTNode
array) { return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset); } TNode
CodeStubAssembler::LoadTypedArrayLength( TNode
typed_array) { return CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset)); } TNode
CodeStubAssembler::LoadMapBitField(SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return UncheckedCast
( LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8())); } TNode
CodeStubAssembler::LoadMapBitField2(SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return UncheckedCast
( LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8())); } TNode
CodeStubAssembler::LoadMapBitField3(SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return UncheckedCast
( LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32())); } TNode
CodeStubAssembler::LoadMapInstanceType(SloppyTNode
map) { return UncheckedCast
( LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16())); } TNode
CodeStubAssembler::LoadMapElementsKind(SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); Node* bit_field2 = LoadMapBitField2(map); return Signed(DecodeWord32
(bit_field2)); } TNode
CodeStubAssembler::LoadElementsKind( SloppyTNode
object) { return LoadMapElementsKind(LoadMap(object)); } TNode
CodeStubAssembler::LoadMapDescriptors( SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return CAST(LoadObjectField(map, Map::kDescriptorsOffset)); } TNode
CodeStubAssembler::LoadMapPrototype(SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return CAST(LoadObjectField(map, Map::kPrototypeOffset)); } TNode
CodeStubAssembler::LoadMapPrototypeInfo( SloppyTNode
map, Label* if_no_proto_info) { Label if_strong_heap_object(this); CSA_ASSERT(this, IsMap(map)); TNode
maybe_prototype_info = LoadMaybeWeakObjectField(map, Map::kTransitionsOrPrototypeInfoOffset); TVARIABLE(Object, prototype_info); DispatchMaybeObject(maybe_prototype_info, if_no_proto_info, if_no_proto_info, if_no_proto_info, &if_strong_heap_object, &prototype_info); BIND(&if_strong_heap_object); GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())), LoadRoot(Heap::kPrototypeInfoMapRootIndex)), if_no_proto_info); return CAST(prototype_info.value()); } TNode
CodeStubAssembler::LoadMapInstanceSizeInWords( SloppyTNode
map) { CSA_SLOW_ASSERT(this, IsMap(map)); return ChangeInt32ToIntPtr(LoadObjectField( map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8())); } TNode