HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Oreo
|
8.0.0_r4
下载
查看原文件
收藏
根目录
external
v8
src
objects-inl.h
// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // Review notes: // // - The use of macros in these inline functions may seem superfluous // but it is absolutely needed to make sure gcc generates optimal // code. gcc is not happy when attempting to inline too deep. // #ifndef V8_OBJECTS_INL_H_ #define V8_OBJECTS_INL_H_ #include "src/base/atomicops.h" #include "src/base/bits.h" #include "src/builtins/builtins.h" #include "src/contexts-inl.h" #include "src/conversions-inl.h" #include "src/factory.h" #include "src/field-index-inl.h" #include "src/field-type.h" #include "src/handles-inl.h" #include "src/heap/heap-inl.h" #include "src/heap/heap.h" #include "src/isolate-inl.h" #include "src/isolate.h" #include "src/keys.h" #include "src/layout-descriptor-inl.h" #include "src/lookup-cache-inl.h" #include "src/lookup.h" #include "src/objects.h" #include "src/property.h" #include "src/prototype.h" #include "src/transitions-inl.h" #include "src/type-feedback-vector-inl.h" #include "src/v8memory.h" namespace v8 { namespace internal { PropertyDetails::PropertyDetails(Smi* smi) { value_ = smi->value(); } Smi* PropertyDetails::AsSmi() const { // Ensure the upper 2 bits have the same value by sign extending it. This is // necessary to be able to use the 31st bit of the property details. int value = value_ << 1; return Smi::FromInt(value >> 1); } int PropertyDetails::field_width_in_words() const { DCHECK(location() == kField); if (!FLAG_unbox_double_fields) return 1; if (kDoubleSize == kPointerSize) return 1; return representation().IsDouble() ? kDoubleSize / kPointerSize : 1; } #define TYPE_CHECKER(type, instancetype) \ bool HeapObject::Is##type() const { \ return map()->instance_type() == instancetype; \ } #define CAST_ACCESSOR(type) \ type* type::cast(Object* object) { \ SLOW_DCHECK(object->Is##type()); \ return reinterpret_cast
(object); \ } \ const type* type::cast(const Object* object) { \ SLOW_DCHECK(object->Is##type()); \ return reinterpret_cast
(object); \ } #define INT_ACCESSORS(holder, name, offset) \ int holder::name() const { return READ_INT_FIELD(this, offset); } \ void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } #define ACCESSORS_CHECKED(holder, name, type, offset, condition) \ type* holder::name() const { \ DCHECK(condition); \ return type::cast(READ_FIELD(this, offset)); \ } \ void holder::set_##name(type* value, WriteBarrierMode mode) { \ DCHECK(condition); \ WRITE_FIELD(this, offset, value); \ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ } #define ACCESSORS(holder, name, type, offset) \ ACCESSORS_CHECKED(holder, name, type, offset, true) // Getter that returns a Smi as an int and writes an int as a Smi. #define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \ int holder::name() const { \ DCHECK(condition); \ Object* value = READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::set_##name(int value) { \ DCHECK(condition); \ WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define SMI_ACCESSORS(holder, name, offset) \ SMI_ACCESSORS_CHECKED(holder, name, offset, true) #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ int holder::synchronized_##name() const { \ Object* value = ACQUIRE_READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::synchronized_set_##name(int value) { \ RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ int holder::nobarrier_##name() const { \ Object* value = NOBARRIER_READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::nobarrier_set_##name(int value) { \ NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define BOOL_GETTER(holder, field, name, offset) \ bool holder::name() const { \ return BooleanBit::get(field(), offset); \ } \ #define BOOL_ACCESSORS(holder, field, name, offset) \ bool holder::name() const { \ return BooleanBit::get(field(), offset); \ } \ void holder::set_##name(bool value) { \ set_##field(BooleanBit::set(field(), offset, value)); \ } bool HeapObject::IsFixedArrayBase() const { return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase(); } bool HeapObject::IsFixedArray() const { InstanceType instance_type = map()->instance_type(); return instance_type == FIXED_ARRAY_TYPE || instance_type == TRANSITION_ARRAY_TYPE; } // External objects are not extensible, so the map check is enough. bool HeapObject::IsExternal() const { return map() == GetHeap()->external_map(); } TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE) TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE) TYPE_CHECKER(Symbol, SYMBOL_TYPE) TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE) #define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \ bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); } SIMD128_TYPES(SIMD128_TYPE_CHECKER) #undef SIMD128_TYPE_CHECKER #define IS_TYPE_FUNCTION_DEF(type_) \ bool Object::Is##type_() const { \ return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \ } HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF) #undef IS_TYPE_FUNCTION_DEF #define IS_TYPE_FUNCTION_DEF(Type, Value) \ bool Object::Is##Type(Isolate* isolate) const { \ return this == isolate->heap()->Value(); \ } \ bool HeapObject::Is##Type(Isolate* isolate) const { \ return this == isolate->heap()->Value(); \ } ODDBALL_LIST(IS_TYPE_FUNCTION_DEF) #undef IS_TYPE_FUNCTION_DEF bool HeapObject::IsString() const { return map()->instance_type() < FIRST_NONSTRING_TYPE; } bool HeapObject::IsName() const { return map()->instance_type() <= LAST_NAME_TYPE; } bool HeapObject::IsUniqueName() const { return IsInternalizedString() || IsSymbol(); } bool Name::IsUniqueName() const { uint32_t type = map()->instance_type(); return (type & (kIsNotStringMask | kIsNotInternalizedMask)) != (kStringTag | kNotInternalizedTag); } bool HeapObject::IsFunction() const { STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); return map()->instance_type() >= FIRST_FUNCTION_TYPE; } bool HeapObject::IsCallable() const { return map()->is_callable(); } bool HeapObject::IsConstructor() const { return map()->is_constructor(); } bool HeapObject::IsTemplateInfo() const { return IsObjectTemplateInfo() || IsFunctionTemplateInfo(); } bool HeapObject::IsInternalizedString() const { uint32_t type = map()->instance_type(); STATIC_ASSERT(kNotInternalizedTag != 0); return (type & (kIsNotStringMask | kIsNotInternalizedMask)) == (kStringTag | kInternalizedTag); } bool HeapObject::IsConsString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsCons(); } bool HeapObject::IsSlicedString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSliced(); } bool HeapObject::IsSeqString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential(); } bool HeapObject::IsSeqOneByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsOneByteRepresentation(); } bool HeapObject::IsSeqTwoByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsTwoByteRepresentation(); } bool HeapObject::IsExternalString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal(); } bool HeapObject::IsExternalOneByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsOneByteRepresentation(); } bool HeapObject::IsExternalTwoByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsTwoByteRepresentation(); } bool Object::HasValidElements() { // Dictionary is covered under FixedArray. return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase(); } bool Object::KeyEquals(Object* second) { Object* first = this; if (second->IsNumber()) { if (first->IsNumber()) return first->Number() == second->Number(); Object* temp = first; first = second; second = temp; } if (first->IsNumber()) { DCHECK_LE(0, first->Number()); uint32_t expected = static_cast
(first->Number()); uint32_t index; return Name::cast(second)->AsArrayIndex(&index) && index == expected; } return Name::cast(first)->Equals(Name::cast(second)); } bool Object::FilterKey(PropertyFilter filter) { if (IsSymbol()) { if (filter & SKIP_SYMBOLS) return true; if (Symbol::cast(this)->is_private()) return true; } else { if (filter & SKIP_STRINGS) return true; } return false; } Handle
Object::NewStorageFor(Isolate* isolate, Handle
object, Representation representation) { if (representation.IsSmi() && object->IsUninitialized(isolate)) { return handle(Smi::kZero, isolate); } if (!representation.IsDouble()) return object; double value; if (object->IsUninitialized(isolate)) { value = 0; } else if (object->IsMutableHeapNumber()) { value = HeapNumber::cast(*object)->value(); } else { value = object->Number(); } return isolate->factory()->NewHeapNumber(value, MUTABLE); } Handle
Object::WrapForRead(Isolate* isolate, Handle
object, Representation representation) { DCHECK(!object->IsUninitialized(isolate)); if (!representation.IsDouble()) { DCHECK(object->FitsRepresentation(representation)); return object; } return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value()); } StringShape::StringShape(const String* str) : type_(str->map()->instance_type()) { set_valid(); DCHECK((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(Map* map) : type_(map->instance_type()) { set_valid(); DCHECK((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(InstanceType t) : type_(static_cast
(t)) { set_valid(); DCHECK((type_ & kIsNotStringMask) == kStringTag); } bool StringShape::IsInternalized() { DCHECK(valid()); STATIC_ASSERT(kNotInternalizedTag != 0); return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) == (kStringTag | kInternalizedTag); } bool String::IsOneByteRepresentation() const { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kOneByteStringTag; } bool String::IsTwoByteRepresentation() const { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kTwoByteStringTag; } bool String::IsOneByteRepresentationUnderneath() { uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); DCHECK(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return true; case kTwoByteStringTag: return false; default: // Cons or sliced string. Need to go deeper. return GetUnderlying()->IsOneByteRepresentation(); } } bool String::IsTwoByteRepresentationUnderneath() { uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); DCHECK(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return false; case kTwoByteStringTag: return true; default: // Cons or sliced string. Need to go deeper. return GetUnderlying()->IsTwoByteRepresentation(); } } bool String::HasOnlyOneByteChars() { uint32_t type = map()->instance_type(); return (type & kOneByteDataHintMask) == kOneByteDataHintTag || IsOneByteRepresentation(); } bool StringShape::IsCons() { return (type_ & kStringRepresentationMask) == kConsStringTag; } bool StringShape::IsSliced() { return (type_ & kStringRepresentationMask) == kSlicedStringTag; } bool StringShape::IsIndirect() { return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag; } bool StringShape::IsExternal() { return (type_ & kStringRepresentationMask) == kExternalStringTag; } bool StringShape::IsSequential() { return (type_ & kStringRepresentationMask) == kSeqStringTag; } StringRepresentationTag StringShape::representation_tag() { uint32_t tag = (type_ & kStringRepresentationMask); return static_cast
(tag); } uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; } uint32_t StringShape::full_representation_tag() { return (type_ & (kStringRepresentationMask | kStringEncodingMask)); } STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) == Internals::kFullStringRepresentationMask); STATIC_ASSERT(static_cast
(kStringEncodingMask) == Internals::kStringEncodingMask); bool StringShape::IsSequentialOneByte() { return full_representation_tag() == (kSeqStringTag | kOneByteStringTag); } bool StringShape::IsSequentialTwoByte() { return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag); } bool StringShape::IsExternalOneByte() { return full_representation_tag() == (kExternalStringTag | kOneByteStringTag); } STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) == Internals::kExternalOneByteRepresentationTag); STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag); bool StringShape::IsExternalTwoByte() { return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag); } STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) == Internals::kExternalTwoByteRepresentationTag); STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag); uc32 FlatStringReader::Get(int index) { if (is_one_byte_) { return Get
(index); } else { return Get
(index); } } template
Char FlatStringReader::Get(int index) { DCHECK_EQ(is_one_byte_, sizeof(Char) == 1); DCHECK(0 <= index && index <= length_); if (sizeof(Char) == 1) { return static_cast
(static_cast
(start_)[index]); } else { return static_cast
(static_cast
(start_)[index]); } } Handle
StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } Handle
CompilationCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } Handle
CodeCacheHashTableShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } template
class SequentialStringKey : public HashTableKey { public: explicit SequentialStringKey(Vector
string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } uint32_t Hash() override { hash_field_ = StringHasher::HashSequentialString
(string_.start(), string_.length(), seed_); uint32_t result = hash_field_ >> String::kHashShift; DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } uint32_t HashForObject(Object* other) override { return String::cast(other)->Hash(); } Vector
string_; uint32_t hash_field_; uint32_t seed_; }; class OneByteStringKey : public SequentialStringKey
{ public: OneByteStringKey(Vector
str, uint32_t seed) : SequentialStringKey
(str, seed) { } bool IsMatch(Object* string) override { return String::cast(string)->IsOneByteEqualTo(string_); } Handle
AsHandle(Isolate* isolate) override; }; class SeqOneByteSubStringKey : public HashTableKey { public: SeqOneByteSubStringKey(Handle
string, int from, int length) : string_(string), from_(from), length_(length) { DCHECK(string_->IsSeqOneByteString()); } uint32_t Hash() override { DCHECK(length_ >= 0); DCHECK(from_ + length_ <= string_->length()); const uint8_t* chars = string_->GetChars() + from_; hash_field_ = StringHasher::HashSequentialString( chars, length_, string_->GetHeap()->HashSeed()); uint32_t result = hash_field_ >> String::kHashShift; DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } uint32_t HashForObject(Object* other) override { return String::cast(other)->Hash(); } bool IsMatch(Object* string) override; Handle
AsHandle(Isolate* isolate) override; private: Handle
string_; int from_; int length_; uint32_t hash_field_; }; class TwoByteStringKey : public SequentialStringKey
{ public: explicit TwoByteStringKey(Vector
str, uint32_t seed) : SequentialStringKey
(str, seed) { } bool IsMatch(Object* string) override { return String::cast(string)->IsTwoByteEqualTo(string_); } Handle
AsHandle(Isolate* isolate) override; }; // Utf8StringKey carries a vector of chars as key. class Utf8StringKey : public HashTableKey { public: explicit Utf8StringKey(Vector
string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } bool IsMatch(Object* string) override { return String::cast(string)->IsUtf8EqualTo(string_); } uint32_t Hash() override { if (hash_field_ != 0) return hash_field_ >> String::kHashShift; hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_); uint32_t result = hash_field_ >> String::kHashShift; DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } uint32_t HashForObject(Object* other) override { return String::cast(other)->Hash(); } Handle
AsHandle(Isolate* isolate) override { if (hash_field_ == 0) Hash(); return isolate->factory()->NewInternalizedStringFromUtf8( string_, chars_, hash_field_); } Vector
string_; uint32_t hash_field_; int chars_; // Caches the number of characters when computing the hash code. uint32_t seed_; }; bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); } TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE) TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE) TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE) bool HeapObject::IsFiller() const { InstanceType instance_type = map()->instance_type(); return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE; } #define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \ TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE) TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER) #undef TYPED_ARRAY_TYPE_CHECKER bool HeapObject::IsFixedTypedArrayBase() const { InstanceType instance_type = map()->instance_type(); return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE && instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE); } bool HeapObject::IsJSReceiver() const { STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE; } bool HeapObject::IsJSObject() const { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); return map()->IsJSObjectMap(); } bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); } bool HeapObject::IsJSArrayIterator() const { InstanceType instance_type = map()->instance_type(); return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE && instance_type <= LAST_ARRAY_ITERATOR_TYPE); } TYPE_CHECKER(JSSet, JS_SET_TYPE) TYPE_CHECKER(JSMap, JS_MAP_TYPE) TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE) TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE) TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE) TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE) TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) TYPE_CHECKER(Map, MAP_TYPE) TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE) TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE) TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE) TYPE_CHECKER(JSFixedArrayIterator, JS_FIXED_ARRAY_ITERATOR_TYPE) bool HeapObject::IsJSWeakCollection() const { return IsJSWeakMap() || IsJSWeakSet(); } bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); } bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); } bool HeapObject::IsFrameArray() const { return IsFixedArray(); } bool HeapObject::IsArrayList() const { return IsFixedArray(); } bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); } bool Object::IsLayoutDescriptor() const { return IsSmi() || IsFixedTypedArrayBase(); } bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); } bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); } bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); } bool HeapObject::IsDeoptimizationInputData() const { // Must be a fixed array. if (!IsFixedArray()) return false; // There's no sure way to detect the difference between a fixed array and // a deoptimization data array. Since this is used for asserts we can // check that the length is zero or else the fixed size plus a multiple of // the entry size. int length = FixedArray::cast(this)->length(); if (length == 0) return true; length -= DeoptimizationInputData::kFirstDeoptEntryIndex; return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0; } bool HeapObject::IsDeoptimizationOutputData() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a deoptimization data array. Since this is used for asserts we can check // that the length is plausible though. if (FixedArray::cast(this)->length() % 2 != 0) return false; return true; } bool HeapObject::IsHandlerTable() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a handler table array. return true; } bool HeapObject::IsTemplateList() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a template list. if (FixedArray::cast(this)->length() < 1) return false; return true; } bool HeapObject::IsDependentCode() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a dependent codes array. return true; } bool HeapObject::IsContext() const { Map* map = this->map(); Heap* heap = GetHeap(); return ( map == heap->function_context_map() || map == heap->catch_context_map() || map == heap->with_context_map() || map == heap->native_context_map() || map == heap->block_context_map() || map == heap->module_context_map() || map == heap->script_context_map() || map == heap->debug_evaluate_context_map()); } bool HeapObject::IsNativeContext() const { return map() == GetHeap()->native_context_map(); } bool HeapObject::IsScriptContextTable() const { return map() == GetHeap()->script_context_table_map(); } bool HeapObject::IsScopeInfo() const { return map() == GetHeap()->scope_info_map(); } bool HeapObject::IsModuleInfo() const { return map() == GetHeap()->module_info_map(); } TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE) TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE) template <> inline bool Is
(Object* obj) { return obj->IsJSFunction(); } TYPE_CHECKER(Code, CODE_TYPE) TYPE_CHECKER(Oddball, ODDBALL_TYPE) TYPE_CHECKER(Cell, CELL_TYPE) TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE) TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE) TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) TYPE_CHECKER(JSDate, JS_DATE_TYPE) TYPE_CHECKER(JSError, JS_ERROR_TYPE) TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE) TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE) TYPE_CHECKER(JSValue, JS_VALUE_TYPE) bool HeapObject::IsAbstractCode() const { return IsBytecodeArray() || IsCode(); } bool HeapObject::IsStringWrapper() const { return IsJSValue() && JSValue::cast(this)->value()->IsString(); } TYPE_CHECKER(Foreign, FOREIGN_TYPE) bool HeapObject::IsBoolean() const { return IsOddball() && ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0); } TYPE_CHECKER(JSArray, JS_ARRAY_TYPE) TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE) TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE) bool HeapObject::IsJSArrayBufferView() const { return IsJSDataView() || IsJSTypedArray(); } TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE) template <> inline bool Is
(Object* obj) { return obj->IsJSArray(); } bool HeapObject::IsHashTable() const { return map() == GetHeap()->hash_table_map(); } bool HeapObject::IsWeakHashTable() const { return IsHashTable(); } bool HeapObject::IsDictionary() const { return IsHashTable() && this != GetHeap()->string_table(); } bool Object::IsNameDictionary() const { return IsDictionary(); } bool Object::IsGlobalDictionary() const { return IsDictionary(); } bool Object::IsSeededNumberDictionary() const { return IsDictionary(); } bool HeapObject::IsUnseededNumberDictionary() const { return map() == GetHeap()->unseeded_number_dictionary_map(); } bool HeapObject::IsStringTable() const { return IsHashTable(); } bool HeapObject::IsStringSet() const { return IsHashTable(); } bool HeapObject::IsObjectHashSet() const { return IsHashTable(); } bool HeapObject::IsNormalizedMapCache() const { return NormalizedMapCache::IsNormalizedMapCache(this); } int NormalizedMapCache::GetIndex(Handle
map) { return map->Hash() % NormalizedMapCache::kEntries; } bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) { if (!obj->IsFixedArray()) return false; if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) { return false; } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { reinterpret_cast
(const_cast
(obj)) ->NormalizedMapCacheVerify(); } #endif return true; } bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); } bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); } bool HeapObject::IsMapCache() const { return IsHashTable(); } bool HeapObject::IsObjectHashTable() const { return IsHashTable(); } bool HeapObject::IsOrderedHashTable() const { return map() == GetHeap()->ordered_hash_table_map(); } bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); } bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); } bool Object::IsPrimitive() const { return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap(); } bool HeapObject::IsJSGlobalProxy() const { bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE; DCHECK(!result || map()->is_access_check_needed()); return result; } TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); } bool HeapObject::IsAccessCheckNeeded() const { if (IsJSGlobalProxy()) { const JSGlobalProxy* proxy = JSGlobalProxy::cast(this); JSGlobalObject* global = proxy->GetIsolate()->context()->global_object(); return proxy->IsDetachedFrom(global); } return map()->is_access_check_needed(); } bool HeapObject::IsStruct() const { switch (map()->instance_type()) { #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true; STRUCT_LIST(MAKE_STRUCT_CASE) #undef MAKE_STRUCT_CASE default: return false; } } #define MAKE_STRUCT_PREDICATE(NAME, Name, name) \ bool Object::Is##Name() const { \ return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \ } \ bool HeapObject::Is##Name() const { \ return map()->instance_type() == NAME##_TYPE; \ } STRUCT_LIST(MAKE_STRUCT_PREDICATE) #undef MAKE_STRUCT_PREDICATE double Object::Number() const { DCHECK(IsNumber()); return IsSmi() ? static_cast
(reinterpret_cast
(this)->value()) : reinterpret_cast
(this)->value(); } bool Object::IsNaN() const { return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value()); } bool Object::IsMinusZero() const { return this->IsHeapNumber() && i::IsMinusZero(HeapNumber::cast(this)->value()); } Representation Object::OptimalRepresentation() { if (!FLAG_track_fields) return Representation::Tagged(); if (IsSmi()) { return Representation::Smi(); } else if (FLAG_track_double_fields && IsHeapNumber()) { return Representation::Double(); } else if (FLAG_track_computed_fields && IsUninitialized(HeapObject::cast(this)->GetIsolate())) { return Representation::None(); } else if (FLAG_track_heap_object_fields) { DCHECK(IsHeapObject()); return Representation::HeapObject(); } else { return Representation::Tagged(); } } ElementsKind Object::OptimalElementsKind() { if (IsSmi()) return FAST_SMI_ELEMENTS; if (IsNumber()) return FAST_DOUBLE_ELEMENTS; return FAST_ELEMENTS; } bool Object::FitsRepresentation(Representation representation) { if (FLAG_track_fields && representation.IsSmi()) { return IsSmi(); } else if (FLAG_track_double_fields && representation.IsDouble()) { return IsMutableHeapNumber() || IsNumber(); } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { return IsHeapObject(); } else if (FLAG_track_fields && representation.IsNone()) { return false; } return true; } bool Object::ToUint32(uint32_t* value) { if (IsSmi()) { int num = Smi::cast(this)->value(); if (num < 0) return false; *value = static_cast
(num); return true; } if (IsHeapNumber()) { double num = HeapNumber::cast(this)->value(); if (num < 0) return false; uint32_t uint_value = FastD2UI(num); if (FastUI2D(uint_value) == num) { *value = uint_value; return true; } } return false; } // static MaybeHandle
Object::ToObject(Isolate* isolate, Handle
object) { if (object->IsJSReceiver()) return Handle
::cast(object); return ToObject(isolate, object, isolate->native_context()); } // static MaybeHandle
Object::ToName(Isolate* isolate, Handle
input) { if (input->IsName()) return Handle
::cast(input); return ConvertToName(isolate, input); } // static MaybeHandle
Object::ToPrimitive(Handle
input, ToPrimitiveHint hint) { if (input->IsPrimitive()) return input; return JSReceiver::ToPrimitive(Handle
::cast(input), hint); } bool Object::HasSpecificClassOf(String* name) { return this->IsJSObject() && (JSObject::cast(this)->class_name() == name); } MaybeHandle
Object::GetProperty(Handle
object, Handle
name) { LookupIterator it(object, name); if (!it.IsFound()) return it.factory()->undefined_value(); return GetProperty(&it); } MaybeHandle
JSReceiver::GetProperty(Handle
receiver, Handle
name) { LookupIterator it(receiver, name, receiver); if (!it.IsFound()) return it.factory()->undefined_value(); return Object::GetProperty(&it); } MaybeHandle
Object::GetElement(Isolate* isolate, Handle
object, uint32_t index) { LookupIterator it(isolate, object, index); if (!it.IsFound()) return it.factory()->undefined_value(); return GetProperty(&it); } MaybeHandle
JSReceiver::GetElement(Isolate* isolate, Handle
receiver, uint32_t index) { LookupIterator it(isolate, receiver, index, receiver); if (!it.IsFound()) return it.factory()->undefined_value(); return Object::GetProperty(&it); } Handle
JSReceiver::GetDataProperty(Handle
object, Handle
name) { LookupIterator it(object, name, object, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR); if (!it.IsFound()) return it.factory()->undefined_value(); return GetDataProperty(&it); } MaybeHandle
Object::SetElement(Isolate* isolate, Handle
object, uint32_t index, Handle
value, LanguageMode language_mode) { LookupIterator it(isolate, object, index); MAYBE_RETURN_NULL( SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED)); return value; } MaybeHandle
JSReceiver::GetPrototype(Isolate* isolate, Handle
receiver) { // We don't expect access checks to be needed on JSProxy objects. DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject()); PrototypeIterator iter(isolate, receiver, kStartAtReceiver, PrototypeIterator::END_AT_NON_HIDDEN); do { if (!iter.AdvanceFollowingProxies()) return MaybeHandle
(); } while (!iter.IsAtEnd()); return PrototypeIterator::GetCurrent(iter); } MaybeHandle
JSReceiver::GetProperty(Isolate* isolate, Handle
receiver, const char* name) { Handle
str = isolate->factory()->InternalizeUtf8String(name); return GetProperty(receiver, str); } // static MUST_USE_RESULT MaybeHandle
JSReceiver::OwnPropertyKeys( Handle
object) { return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES, GetKeysConversion::kConvertToString); } bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) { DisallowHeapAllocation no_gc; HeapObject* prototype = HeapObject::cast(object->map()->prototype()); HeapObject* null = isolate->heap()->null_value(); HeapObject* empty = isolate->heap()->empty_fixed_array(); while (prototype != null) { Map* map = prototype->map(); if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false; if (JSObject::cast(prototype)->elements() != empty) return false; prototype = HeapObject::cast(map->prototype()); } return true; } #define FIELD_ADDR(p, offset) \ (reinterpret_cast
(p) + offset - kHeapObjectTag) #define FIELD_ADDR_CONST(p, offset) \ (reinterpret_cast
(p) + offset - kHeapObjectTag) #define READ_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define ACQUIRE_READ_FIELD(p, offset) \ reinterpret_cast
(base::Acquire_Load( \ reinterpret_cast
(FIELD_ADDR_CONST(p, offset)))) #define NOBARRIER_READ_FIELD(p, offset) \ reinterpret_cast
(base::NoBarrier_Load( \ reinterpret_cast
(FIELD_ADDR_CONST(p, offset)))) #define WRITE_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define RELEASE_WRITE_FIELD(p, offset, value) \ base::Release_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ reinterpret_cast
(value)); #define NOBARRIER_WRITE_FIELD(p, offset, value) \ base::NoBarrier_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ reinterpret_cast
(value)); #define WRITE_BARRIER(heap, object, offset, value) \ heap->incremental_marking()->RecordWrite( \ object, HeapObject::RawField(object, offset), value); \ heap->RecordWrite(object, offset, value); #define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \ do { \ heap->RecordFixedArrayElements(array, start, length); \ heap->incremental_marking()->IterateBlackObject(array); \ } while (false) #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ if (mode != SKIP_WRITE_BARRIER) { \ if (mode == UPDATE_WRITE_BARRIER) { \ heap->incremental_marking()->RecordWrite( \ object, HeapObject::RawField(object, offset), value); \ } \ heap->RecordWrite(object, offset, value); \ } #define READ_DOUBLE_FIELD(p, offset) \ ReadDoubleValue(FIELD_ADDR_CONST(p, offset)) #define WRITE_DOUBLE_FIELD(p, offset, value) \ WriteDoubleValue(FIELD_ADDR(p, offset), value) #define READ_INT_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INTPTR_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INTPTR_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_UINT8_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_UINT8_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT8_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT8_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_UINT16_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_UINT16_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT16_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT16_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_UINT32_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_UINT32_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT32_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT32_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_FLOAT_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_FLOAT_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_UINT64_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_UINT64_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT64_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT64_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_BYTE_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR_CONST(p, offset))) #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ static_cast
(base::NoBarrier_Load( \ reinterpret_cast
(FIELD_ADDR(p, offset)))) #define WRITE_BYTE_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ base::NoBarrier_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ static_cast
(value)); Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { return reinterpret_cast
(FIELD_ADDR(obj, byte_offset)); } MapWord MapWord::FromMap(const Map* map) { return MapWord(reinterpret_cast
(map)); } Map* MapWord::ToMap() { return reinterpret_cast
(value_); } bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(reinterpret_cast
(value_)); } MapWord MapWord::FromForwardingAddress(HeapObject* object) { Address raw = reinterpret_cast
(object) - kHeapObjectTag; return MapWord(reinterpret_cast
(raw)); } HeapObject* MapWord::ToForwardingAddress() { DCHECK(IsForwardingAddress()); return HeapObject::FromAddress(reinterpret_cast
(value_)); } #ifdef VERIFY_HEAP void HeapObject::VerifyObjectField(int offset) { VerifyPointer(READ_FIELD(this, offset)); } void HeapObject::VerifySmiField(int offset) { CHECK(READ_FIELD(this, offset)->IsSmi()); } #endif Heap* HeapObject::GetHeap() const { Heap* heap = MemoryChunk::FromAddress( reinterpret_cast
(const_cast
(this))) ->heap(); SLOW_DCHECK(heap != NULL); return heap; } Isolate* HeapObject::GetIsolate() const { return GetHeap()->isolate(); } Map* HeapObject::map() const { #ifdef DEBUG // Clear mark potentially added by PathTracer. uintptr_t raw_value = map_word().ToRawValue() & ~static_cast
(PathTracer::kMarkTag); return MapWord::FromRawValue(raw_value).ToMap(); #else return map_word().ToMap(); #endif } void HeapObject::set_map(Map* value) { set_map_word(MapWord::FromMap(value)); if (value != NULL) { // TODO(1600) We are passing NULL as a slot because maps can never be on // evacuation candidate. value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); } } Map* HeapObject::synchronized_map() { return synchronized_map_word().ToMap(); } void HeapObject::synchronized_set_map(Map* value) { synchronized_set_map_word(MapWord::FromMap(value)); if (value != NULL) { // TODO(1600) We are passing NULL as a slot because maps can never be on // evacuation candidate. value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); } } void HeapObject::synchronized_set_map_no_write_barrier(Map* value) { synchronized_set_map_word(MapWord::FromMap(value)); } // Unsafe accessor omitting write barrier. void HeapObject::set_map_no_write_barrier(Map* value) { set_map_word(MapWord::FromMap(value)); } MapWord HeapObject::map_word() const { return MapWord( reinterpret_cast
(NOBARRIER_READ_FIELD(this, kMapOffset))); } void HeapObject::set_map_word(MapWord map_word) { NOBARRIER_WRITE_FIELD( this, kMapOffset, reinterpret_cast
(map_word.value_)); } MapWord HeapObject::synchronized_map_word() const { return MapWord( reinterpret_cast
(ACQUIRE_READ_FIELD(this, kMapOffset))); } void HeapObject::synchronized_set_map_word(MapWord map_word) { RELEASE_WRITE_FIELD( this, kMapOffset, reinterpret_cast
(map_word.value_)); } int HeapObject::Size() { return SizeFromMap(map()); } double HeapNumber::value() const { return READ_DOUBLE_FIELD(this, kValueOffset); } void HeapNumber::set_value(double value) { WRITE_DOUBLE_FIELD(this, kValueOffset, value); } int HeapNumber::get_exponent() { return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >> kExponentShift) - kExponentBias; } int HeapNumber::get_sign() { return READ_INT_FIELD(this, kExponentOffset) & kSignMask; } bool Simd128Value::Equals(Simd128Value* that) { // TODO(bmeurer): This doesn't match the SIMD.js specification, but it seems // to be consistent with what the CompareICStub does, and what is tested in // the current SIMD.js testsuite. if (this == that) return true; #define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \ if (this->Is##Type()) { \ if (!that->Is##Type()) return false; \ return Type::cast(this)->Equals(Type::cast(that)); \ } SIMD128_TYPES(SIMD128_VALUE) #undef SIMD128_VALUE return false; } // static bool Simd128Value::Equals(Handle
one, Handle
two) { return one->Equals(*two); } #define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \ bool Type::Equals(Type* that) { \ for (int lane = 0; lane < lane_count; ++lane) { \ if (this->get_lane(lane) != that->get_lane(lane)) return false; \ } \ return true; \ } SIMD128_TYPES(SIMD128_VALUE_EQUALS) #undef SIMD128_VALUE_EQUALS #if defined(V8_TARGET_LITTLE_ENDIAN) #define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \ lane_type value = \ READ_##field_type##_FIELD(this, kValueOffset + lane * field_size); #elif defined(V8_TARGET_BIG_ENDIAN) #define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \ lane_type value = READ_##field_type##_FIELD( \ this, kValueOffset + (lane_count - lane - 1) * field_size); #else #error Unknown byte ordering #endif #if defined(V8_TARGET_LITTLE_ENDIAN) #define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \ WRITE_##field_type##_FIELD(this, kValueOffset + lane * field_size, value); #elif defined(V8_TARGET_BIG_ENDIAN) #define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \ WRITE_##field_type##_FIELD( \ this, kValueOffset + (lane_count - lane - 1) * field_size, value); #else #error Unknown byte ordering #endif #define SIMD128_NUMERIC_LANE_FNS(type, lane_type, lane_count, field_type, \ field_size) \ lane_type type::get_lane(int lane) const { \ DCHECK(lane < lane_count && lane >= 0); \ SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \ return value; \ } \ \ void type::set_lane(int lane, lane_type value) { \ DCHECK(lane < lane_count && lane >= 0); \ SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \ } SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize) SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size) SIMD128_NUMERIC_LANE_FNS(Uint32x4, uint32_t, 4, UINT32, kInt32Size) SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize) SIMD128_NUMERIC_LANE_FNS(Uint16x8, uint16_t, 8, UINT16, kShortSize) SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize) SIMD128_NUMERIC_LANE_FNS(Uint8x16, uint8_t, 16, UINT8, kCharSize) #undef SIMD128_NUMERIC_LANE_FNS #define SIMD128_BOOLEAN_LANE_FNS(type, lane_type, lane_count, field_type, \ field_size) \ bool type::get_lane(int lane) const { \ DCHECK(lane < lane_count && lane >= 0); \ SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \ DCHECK(value == 0 || value == -1); \ return value != 0; \ } \ \ void type::set_lane(int lane, bool value) { \ DCHECK(lane < lane_count && lane >= 0); \ int32_t int_val = value ? -1 : 0; \ SIMD128_WRITE_LANE(lane_count, field_type, field_size, int_val) \ } SIMD128_BOOLEAN_LANE_FNS(Bool32x4, int32_t, 4, INT32, kInt32Size) SIMD128_BOOLEAN_LANE_FNS(Bool16x8, int16_t, 8, INT16, kShortSize) SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize) #undef SIMD128_BOOLEAN_LANE_FNS #undef SIMD128_READ_LANE #undef SIMD128_WRITE_LANE ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset) Object** FixedArray::GetFirstElementAddress() { return reinterpret_cast
(FIELD_ADDR(this, OffsetOfElementAt(0))); } bool FixedArray::ContainsOnlySmisOrHoles() { Object* the_hole = GetHeap()->the_hole_value(); Object** current = GetFirstElementAddress(); for (int i = 0; i < length(); ++i) { Object* candidate = *current++; if (!candidate->IsSmi() && candidate != the_hole) return false; } return true; } FixedArrayBase* JSObject::elements() const { Object* array = READ_FIELD(this, kElementsOffset); return static_cast
(array); } void AllocationSite::Initialize() { set_transition_info(Smi::kZero); SetElementsKind(GetInitialFastElementsKind()); set_nested_site(Smi::kZero); set_pretenure_data(0); set_pretenure_create_count(0); set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()), SKIP_WRITE_BARRIER); } bool AllocationSite::IsZombie() { return pretenure_decision() == kZombie; } bool AllocationSite::IsMaybeTenure() { return pretenure_decision() == kMaybeTenure; } bool AllocationSite::PretenuringDecisionMade() { return pretenure_decision() != kUndecided; } void AllocationSite::MarkZombie() { DCHECK(!IsZombie()); Initialize(); set_pretenure_decision(kZombie); } ElementsKind AllocationSite::GetElementsKind() { DCHECK(!SitePointsToLiteral()); int value = Smi::cast(transition_info())->value(); return ElementsKindBits::decode(value); } void AllocationSite::SetElementsKind(ElementsKind kind) { int value = Smi::cast(transition_info())->value(); set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)), SKIP_WRITE_BARRIER); } bool AllocationSite::CanInlineCall() { int value = Smi::cast(transition_info())->value(); return DoNotInlineBit::decode(value) == 0; } void AllocationSite::SetDoNotInlineCall() { int value = Smi::cast(transition_info())->value(); set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)), SKIP_WRITE_BARRIER); } bool AllocationSite::SitePointsToLiteral() { // If transition_info is a smi, then it represents an ElementsKind // for a constructed array. Otherwise, it must be a boilerplate // for an object or array literal. return transition_info()->IsJSArray() || transition_info()->IsJSObject(); } // Heuristic: We only need to create allocation site info if the boilerplate // elements kind is the initial elements kind. AllocationSiteMode AllocationSite::GetMode( ElementsKind boilerplate_elements_kind) { if (IsFastSmiElementsKind(boilerplate_elements_kind)) { return TRACK_ALLOCATION_SITE; } return DONT_TRACK_ALLOCATION_SITE; } inline bool AllocationSite::CanTrack(InstanceType type) { if (FLAG_allocation_site_pretenuring) { return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE || type < FIRST_NONSTRING_TYPE; } return type == JS_ARRAY_TYPE; } AllocationSite::PretenureDecision AllocationSite::pretenure_decision() { int value = pretenure_data(); return PretenureDecisionBits::decode(value); } void AllocationSite::set_pretenure_decision(PretenureDecision decision) { int value = pretenure_data(); set_pretenure_data(PretenureDecisionBits::update(value, decision)); } bool AllocationSite::deopt_dependent_code() { int value = pretenure_data(); return DeoptDependentCodeBit::decode(value); } void AllocationSite::set_deopt_dependent_code(bool deopt) { int value = pretenure_data(); set_pretenure_data(DeoptDependentCodeBit::update(value, deopt)); } int AllocationSite::memento_found_count() { int value = pretenure_data(); return MementoFoundCountBits::decode(value); } inline void AllocationSite::set_memento_found_count(int count) { int value = pretenure_data(); // Verify that we can count more mementos than we can possibly find in one // new space collection. DCHECK((GetHeap()->MaxSemiSpaceSize() / (Heap::kMinObjectSizeInWords * kPointerSize + AllocationMemento::kSize)) < MementoFoundCountBits::kMax); DCHECK(count < MementoFoundCountBits::kMax); set_pretenure_data(MementoFoundCountBits::update(value, count)); } int AllocationSite::memento_create_count() { return pretenure_create_count(); } void AllocationSite::set_memento_create_count(int count) { set_pretenure_create_count(count); } bool AllocationSite::IncrementMementoFoundCount(int increment) { if (IsZombie()) return false; int value = memento_found_count(); set_memento_found_count(value + increment); return memento_found_count() >= kPretenureMinimumCreated; } inline void AllocationSite::IncrementMementoCreateCount() { DCHECK(FLAG_allocation_site_pretenuring); int value = memento_create_count(); set_memento_create_count(value + 1); } inline bool AllocationSite::MakePretenureDecision( PretenureDecision current_decision, double ratio, bool maximum_size_scavenge) { // Here we just allow state transitions from undecided or maybe tenure // to don't tenure, maybe tenure, or tenure. if ((current_decision == kUndecided || current_decision == kMaybeTenure)) { if (ratio >= kPretenureRatio) { // We just transition into tenure state when the semi-space was at // maximum capacity. if (maximum_size_scavenge) { set_deopt_dependent_code(true); set_pretenure_decision(kTenure); // Currently we just need to deopt when we make a state transition to // tenure. return true; } set_pretenure_decision(kMaybeTenure); } else { set_pretenure_decision(kDontTenure); } } return false; } inline bool AllocationSite::DigestPretenuringFeedback( bool maximum_size_scavenge) { bool deopt = false; int create_count = memento_create_count(); int found_count = memento_found_count(); bool minimum_mementos_created = create_count >= kPretenureMinimumCreated; double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics ? static_cast
(found_count) / create_count : 0.0; PretenureDecision current_decision = pretenure_decision(); if (minimum_mementos_created) { deopt = MakePretenureDecision( current_decision, ratio, maximum_size_scavenge); } if (FLAG_trace_pretenuring_statistics) { PrintIsolate(GetIsolate(), "pretenuring: AllocationSite(%p): (created, found, ratio) " "(%d, %d, %f) %s => %s\n", static_cast
(this), create_count, found_count, ratio, PretenureDecisionName(current_decision), PretenureDecisionName(pretenure_decision())); } // Clear feedback calculation fields until the next gc. set_memento_found_count(0); set_memento_create_count(0); return deopt; } bool AllocationMemento::IsValid() { return allocation_site()->IsAllocationSite() && !AllocationSite::cast(allocation_site())->IsZombie(); } AllocationSite* AllocationMemento::GetAllocationSite() { DCHECK(IsValid()); return AllocationSite::cast(allocation_site()); } Address AllocationMemento::GetAllocationSiteUnchecked() { return reinterpret_cast
(allocation_site()); } void JSObject::EnsureCanContainHeapObjectElements(Handle
object) { JSObject::ValidateElements(object); ElementsKind elements_kind = object->map()->elements_kind(); if (!IsFastObjectElementsKind(elements_kind)) { if (IsFastHoleyElementsKind(elements_kind)) { TransitionElementsKind(object, FAST_HOLEY_ELEMENTS); } else { TransitionElementsKind(object, FAST_ELEMENTS); } } } void JSObject::EnsureCanContainElements(Handle
object, Object** objects, uint32_t count, EnsureElementsMode mode) { ElementsKind current_kind = object->GetElementsKind(); ElementsKind target_kind = current_kind; { DisallowHeapAllocation no_allocation; DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS); bool is_holey = IsFastHoleyElementsKind(current_kind); if (current_kind == FAST_HOLEY_ELEMENTS) return; Object* the_hole = object->GetHeap()->the_hole_value(); for (uint32_t i = 0; i < count; ++i) { Object* current = *objects++; if (current == the_hole) { is_holey = true; target_kind = GetHoleyElementsKind(target_kind); } else if (!current->IsSmi()) { if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) { if (IsFastSmiElementsKind(target_kind)) { if (is_holey) { target_kind = FAST_HOLEY_DOUBLE_ELEMENTS; } else { target_kind = FAST_DOUBLE_ELEMENTS; } } } else if (is_holey) { target_kind = FAST_HOLEY_ELEMENTS; break; } else { target_kind = FAST_ELEMENTS; } } } } if (target_kind != current_kind) { TransitionElementsKind(object, target_kind); } } void JSObject::EnsureCanContainElements(Handle
object, Handle
elements, uint32_t length, EnsureElementsMode mode) { Heap* heap = object->GetHeap(); if (elements->map() != heap->fixed_double_array_map()) { DCHECK(elements->map() == heap->fixed_array_map() || elements->map() == heap->fixed_cow_array_map()); if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) { mode = DONT_ALLOW_DOUBLE_ELEMENTS; } Object** objects = Handle
::cast(elements)->GetFirstElementAddress(); EnsureCanContainElements(object, objects, length, mode); return; } DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS); if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) { TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS); } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) { Handle
double_array = Handle
::cast(elements); for (uint32_t i = 0; i < length; ++i) { if (double_array->is_the_hole(i)) { TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS); return; } } TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS); } } void JSObject::SetMapAndElements(Handle
object, Handle
new_map, Handle
value) { JSObject::MigrateToMap(object, new_map); DCHECK((object->map()->has_fast_smi_or_object_elements() || (*value == object->GetHeap()->empty_fixed_array()) || object->map()->has_fast_string_wrapper_elements()) == (value->map() == object->GetHeap()->fixed_array_map() || value->map() == object->GetHeap()->fixed_cow_array_map())); DCHECK((*value == object->GetHeap()->empty_fixed_array()) || (object->map()->has_fast_double_elements() == value->IsFixedDoubleArray())); object->set_elements(*value); } void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { WRITE_FIELD(this, kElementsOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); } void JSObject::initialize_elements() { FixedArrayBase* elements = map()->GetInitialElements(); WRITE_FIELD(this, kElementsOffset, elements); } InterceptorInfo* JSObject::GetIndexedInterceptor() { return map()->GetIndexedInterceptor(); } InterceptorInfo* JSObject::GetNamedInterceptor() { return map()->GetNamedInterceptor(); } InterceptorInfo* Map::GetNamedInterceptor() { DCHECK(has_named_interceptor()); JSFunction* constructor = JSFunction::cast(GetConstructor()); DCHECK(constructor->shared()->IsApiFunction()); return InterceptorInfo::cast( constructor->shared()->get_api_func_data()->named_property_handler()); } InterceptorInfo* Map::GetIndexedInterceptor() { DCHECK(has_indexed_interceptor()); JSFunction* constructor = JSFunction::cast(GetConstructor()); DCHECK(constructor->shared()->IsApiFunction()); return InterceptorInfo::cast( constructor->shared()->get_api_func_data()->indexed_property_handler()); } double Oddball::to_number_raw() const { return READ_DOUBLE_FIELD(this, kToNumberRawOffset); } void Oddball::set_to_number_raw(double value) { WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value); } ACCESSORS(Oddball, to_string, String, kToStringOffset) ACCESSORS(Oddball, to_number, Object, kToNumberOffset) ACCESSORS(Oddball, type_of, String, kTypeOfOffset) byte Oddball::kind() const { return Smi::cast(READ_FIELD(this, kKindOffset))->value(); } void Oddball::set_kind(byte value) { WRITE_FIELD(this, kKindOffset, Smi::FromInt(value)); } // static Handle
Oddball::ToNumber(Handle
input) { return handle(input->to_number(), input->GetIsolate()); } ACCESSORS(Cell, value, Object, kValueOffset) ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset) ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset) ACCESSORS(PropertyCell, value, Object, kValueOffset) PropertyDetails PropertyCell::property_details() { return PropertyDetails(Smi::cast(property_details_raw())); } void PropertyCell::set_property_details(PropertyDetails details) { set_property_details_raw(details.AsSmi()); } Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); } void WeakCell::clear() { // Either the garbage collector is clearing the cell or we are simply // initializing the root empty weak cell. DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT || this == GetHeap()->empty_weak_cell()); WRITE_FIELD(this, kValueOffset, Smi::kZero); } void WeakCell::initialize(HeapObject* val) { WRITE_FIELD(this, kValueOffset, val); // We just have to execute the generational barrier here because we never // mark through a weak cell and collect evacuation candidates when we process // all weak cells. WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this)) ? UPDATE_WRITE_BARRIER : UPDATE_WEAK_WRITE_BARRIER; CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode); } bool WeakCell::cleared() const { return value() == Smi::kZero; } Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); } void WeakCell::set_next(Object* val, WriteBarrierMode mode) { WRITE_FIELD(this, kNextOffset, val); if (mode == UPDATE_WRITE_BARRIER) { WRITE_BARRIER(GetHeap(), this, kNextOffset, val); } } void WeakCell::clear_next(Object* the_hole_value) { DCHECK_EQ(GetHeap()->the_hole_value(), the_hole_value); set_next(the_hole_value, SKIP_WRITE_BARRIER); } bool WeakCell::next_cleared() { return next()->IsTheHole(GetIsolate()); } int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); } int JSObject::GetHeaderSize(InstanceType type) { // Check for the most common kind of JavaScript object before // falling into the generic switch. This speeds up the internal // field operations considerably on average. if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize; switch (type) { case JS_API_OBJECT_TYPE: case JS_SPECIAL_API_OBJECT_TYPE: return JSObject::kHeaderSize; case JS_GENERATOR_OBJECT_TYPE: return JSGeneratorObject::kSize; case JS_GLOBAL_PROXY_TYPE: return JSGlobalProxy::kSize; case JS_GLOBAL_OBJECT_TYPE: return JSGlobalObject::kSize; case JS_BOUND_FUNCTION_TYPE: return JSBoundFunction::kSize; case JS_FUNCTION_TYPE: return JSFunction::kSize; case JS_VALUE_TYPE: return JSValue::kSize; case JS_DATE_TYPE: return JSDate::kSize; case JS_ARRAY_TYPE: return JSArray::kSize; case JS_ARRAY_BUFFER_TYPE: return JSArrayBuffer::kSize; case JS_TYPED_ARRAY_TYPE: return JSTypedArray::kSize; case JS_DATA_VIEW_TYPE: return JSDataView::kSize; case JS_SET_TYPE: return JSSet::kSize; case JS_MAP_TYPE: return JSMap::kSize; case JS_SET_ITERATOR_TYPE: return JSSetIterator::kSize; case JS_MAP_ITERATOR_TYPE: return JSMapIterator::kSize; case JS_WEAK_MAP_TYPE: return JSWeakMap::kSize; case JS_WEAK_SET_TYPE: return JSWeakSet::kSize; case JS_PROMISE_TYPE: return JSObject::kHeaderSize; case JS_REGEXP_TYPE: return JSRegExp::kSize; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return JSObject::kHeaderSize; case JS_MESSAGE_OBJECT_TYPE: return JSMessageObject::kSize; case JS_ARGUMENTS_TYPE: return JSArgumentsObject::kHeaderSize; case JS_ERROR_TYPE: return JSObject::kHeaderSize; case JS_STRING_ITERATOR_TYPE: return JSStringIterator::kSize; case JS_FIXED_ARRAY_ITERATOR_TYPE: return JSFixedArrayIterator::kHeaderSize; default: UNREACHABLE(); return 0; } } int JSObject::GetInternalFieldCount(Map* map) { int instance_size = map->instance_size(); if (instance_size == kVariableSizeSentinel) return 0; InstanceType instance_type = map->instance_type(); return ((instance_size - GetHeaderSize(instance_type)) >> kPointerSizeLog2) - map->GetInObjectProperties(); } int JSObject::GetInternalFieldCount() { return GetInternalFieldCount(map()); } int JSObject::GetInternalFieldOffset(int index) { DCHECK(index < GetInternalFieldCount() && index >= 0); return GetHeaderSize() + (kPointerSize * index); } Object* JSObject::GetInternalField(int index) { DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index)); } void JSObject::SetInternalField(int index, Object* value) { DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } void JSObject::SetInternalField(int index, Smi* value) { DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); } bool JSObject::IsUnboxedDoubleField(FieldIndex index) { if (!FLAG_unbox_double_fields) return false; return map()->IsUnboxedDoubleField(index); } bool Map::IsUnboxedDoubleField(FieldIndex index) { if (!FLAG_unbox_double_fields) return false; if (index.is_hidden_field() || !index.is_inobject()) return false; return !layout_descriptor()->IsTagged(index.property_index()); } // Access fast-case object properties at index. The use of these routines // is needed to correctly distinguish between properties stored in-object and // properties stored in the properties array. Object* JSObject::RawFastPropertyAt(FieldIndex index) { DCHECK(!IsUnboxedDoubleField(index)); if (index.is_inobject()) { return READ_FIELD(this, index.offset()); } else { return properties()->get(index.outobject_array_index()); } } double JSObject::RawFastDoublePropertyAt(FieldIndex index) { DCHECK(IsUnboxedDoubleField(index)); return READ_DOUBLE_FIELD(this, index.offset()); } void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) { if (index.is_inobject()) { int offset = index.offset(); WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } else { properties()->set(index.outobject_array_index(), value); } } void JSObject::RawFastDoublePropertyAtPut(FieldIndex index, double value) { WRITE_DOUBLE_FIELD(this, index.offset(), value); } void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) { if (IsUnboxedDoubleField(index)) { DCHECK(value->IsMutableHeapNumber()); RawFastDoublePropertyAtPut(index, HeapNumber::cast(value)->value()); } else { RawFastPropertyAtPut(index, value); } } void JSObject::WriteToField(int descriptor, PropertyDetails details, Object* value) { DCHECK(details.type() == DATA); DisallowHeapAllocation no_gc; FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor); if (details.representation().IsDouble()) { // Nothing more to be done. if (value->IsUninitialized(this->GetIsolate())) { return; } if (IsUnboxedDoubleField(index)) { RawFastDoublePropertyAtPut(index, value->Number()); } else { HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index)); DCHECK(box->IsMutableHeapNumber()); box->set_value(value->Number()); } } else { RawFastPropertyAtPut(index, value); } } void JSObject::WriteToField(int descriptor, Object* value) { DescriptorArray* desc = map()->instance_descriptors(); PropertyDetails details = desc->GetDetails(descriptor); WriteToField(descriptor, details, value); } int JSObject::GetInObjectPropertyOffset(int index) { return map()->GetInObjectPropertyOffset(index); } Object* JSObject::InObjectPropertyAt(int index) { int offset = GetInObjectPropertyOffset(index); return READ_FIELD(this, offset); } Object* JSObject::InObjectPropertyAtPut(int index, Object* value, WriteBarrierMode mode) { // Adjust for the number of properties stored in the object. int offset = GetInObjectPropertyOffset(index); WRITE_FIELD(this, offset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); return value; } void JSObject::InitializeBody(Map* map, int start_offset, Object* pre_allocated_value, Object* filler_value) { DCHECK(!filler_value->IsHeapObject() || !GetHeap()->InNewSpace(filler_value)); DCHECK(!pre_allocated_value->IsHeapObject() || !GetHeap()->InNewSpace(pre_allocated_value)); int size = map->instance_size(); int offset = start_offset; if (filler_value != pre_allocated_value) { int end_of_pre_allocated_offset = size - (map->unused_property_fields() * kPointerSize); DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset); while (offset < end_of_pre_allocated_offset) { WRITE_FIELD(this, offset, pre_allocated_value); offset += kPointerSize; } } while (offset < size) { WRITE_FIELD(this, offset, filler_value); offset += kPointerSize; } } bool Map::TooManyFastProperties(StoreFromKeyed store_mode) { if (unused_property_fields() != 0) return false; if (is_prototype_map()) return false; int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12; int limit = Max(minimum, GetInObjectProperties()); int external = NumberOfFields() - GetInObjectProperties(); return external > limit; } void Struct::InitializeBody(int object_size) { Object* value = GetHeap()->undefined_value(); for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) { WRITE_FIELD(this, offset, value); } } bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); } bool Object::ToArrayIndex(uint32_t* index) { return Object::ToUint32(index) && *index != kMaxUInt32; } void Object::VerifyApiCallResultType() { #if DEBUG if (IsSmi()) return; DCHECK(IsHeapObject()); Isolate* isolate = HeapObject::cast(this)->GetIsolate(); if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() || IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) || IsFalse(isolate) || IsNull(isolate))) { FATAL("API call returned invalid object"); } #endif // DEBUG } Object* FixedArray::get(int index) const { SLOW_DCHECK(index >= 0 && index < this->length()); return READ_FIELD(this, kHeaderSize + index * kPointerSize); } Handle
FixedArray::get(FixedArray* array, int index, Isolate* isolate) { return handle(array->get(index), isolate); } template
MaybeHandle
FixedArray::GetValue(Isolate* isolate, int index) const { Object* obj = get(index); if (obj->IsUndefined(isolate)) return MaybeHandle
(); return Handle
(T::cast(obj), isolate); } template
Handle
FixedArray::GetValueChecked(Isolate* isolate, int index) const { Object* obj = get(index); CHECK(!obj->IsUndefined(isolate)); return Handle
(T::cast(obj), isolate); } bool FixedArray::is_the_hole(Isolate* isolate, int index) { return get(index)->IsTheHole(isolate); } void FixedArray::set(int index, Smi* value) { DCHECK(map() != GetHeap()->fixed_cow_array_map()); DCHECK(index >= 0 && index < this->length()); DCHECK(reinterpret_cast
(value)->IsSmi()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); } void FixedArray::set(int index, Object* value) { DCHECK_NE(GetHeap()->fixed_cow_array_map(), map()); DCHECK(IsFixedArray()); DCHECK_GE(index, 0); DCHECK_LT(index, this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } double FixedDoubleArray::get_scalar(int index) { DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); DCHECK(index >= 0 && index < this->length()); DCHECK(!is_the_hole(index)); return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize); } uint64_t FixedDoubleArray::get_representation(int index) { DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); DCHECK(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kDoubleSize; return READ_UINT64_FIELD(this, offset); } Handle
FixedDoubleArray::get(FixedDoubleArray* array, int index, Isolate* isolate) { if (array->is_the_hole(index)) { return isolate->factory()->the_hole_value(); } else { return isolate->factory()->NewNumber(array->get_scalar(index)); } } void FixedDoubleArray::set(int index, double value) { DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; if (std::isnan(value)) { WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits
::quiet_NaN()); } else { WRITE_DOUBLE_FIELD(this, offset, value); } DCHECK(!is_the_hole(index)); } void FixedDoubleArray::set_the_hole(int index) { DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; WRITE_UINT64_FIELD(this, offset, kHoleNanInt64); } bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) { return is_the_hole(index); } bool FixedDoubleArray::is_the_hole(int index) { return get_representation(index) == kHoleNanInt64; } double* FixedDoubleArray::data_start() { return reinterpret_cast
(FIELD_ADDR(this, kHeaderSize)); } void FixedDoubleArray::FillWithHoles(int from, int to) { for (int i = from; i < to; i++) { set_the_hole(i); } } Object* WeakFixedArray::Get(int index) const { Object* raw = FixedArray::cast(this)->get(index + kFirstIndex); if (raw->IsSmi()) return raw; DCHECK(raw->IsWeakCell()); return WeakCell::cast(raw)->value(); } bool WeakFixedArray::IsEmptySlot(int index) const { DCHECK(index < Length()); return Get(index)->IsSmi(); } void WeakFixedArray::Clear(int index) { FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero); } int WeakFixedArray::Length() const { return FixedArray::cast(this)->length() - kFirstIndex; } int WeakFixedArray::last_used_index() const { return Smi::cast(FixedArray::cast(this)->get(kLastUsedIndexIndex))->value(); } void WeakFixedArray::set_last_used_index(int index) { FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index)); } template
T* WeakFixedArray::Iterator::Next() { if (list_ != NULL) { // Assert that list did not change during iteration. DCHECK_EQ(last_used_index_, list_->last_used_index()); while (index_ < list_->Length()) { Object* item = list_->Get(index_++); if (item != Empty()) return T::cast(item); } list_ = NULL; } return NULL; } int ArrayList::Length() { if (FixedArray::cast(this)->length() == 0) return 0; return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value(); } void ArrayList::SetLength(int length) { return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length)); } Object* ArrayList::Get(int index) { return FixedArray::cast(this)->get(kFirstIndex + index); } Object** ArrayList::Slot(int index) { return data_start() + kFirstIndex + index; } void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) { FixedArray::cast(this)->set(kFirstIndex + index, obj, mode); } void ArrayList::Clear(int index, Object* undefined) { DCHECK(undefined->IsUndefined(GetIsolate())); FixedArray::cast(this) ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER); } int RegExpMatchInfo::NumberOfCaptureRegisters() { DCHECK_GE(length(), kLastMatchOverhead); Object* obj = get(kNumberOfCapturesIndex); return Smi::cast(obj)->value(); } void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) { DCHECK_GE(length(), kLastMatchOverhead); set(kNumberOfCapturesIndex, Smi::FromInt(value)); } String* RegExpMatchInfo::LastSubject() { DCHECK_GE(length(), kLastMatchOverhead); Object* obj = get(kLastSubjectIndex); return String::cast(obj); } void RegExpMatchInfo::SetLastSubject(String* value) { DCHECK_GE(length(), kLastMatchOverhead); set(kLastSubjectIndex, value); } Object* RegExpMatchInfo::LastInput() { DCHECK_GE(length(), kLastMatchOverhead); return get(kLastInputIndex); } void RegExpMatchInfo::SetLastInput(Object* value) { DCHECK_GE(length(), kLastMatchOverhead); set(kLastInputIndex, value); } int RegExpMatchInfo::Capture(int i) { DCHECK_LT(i, NumberOfCaptureRegisters()); Object* obj = get(kFirstCaptureIndex + i); return Smi::cast(obj)->value(); } void RegExpMatchInfo::SetCapture(int i, int value) { DCHECK_LT(i, NumberOfCaptureRegisters()); set(kFirstCaptureIndex + i, Smi::FromInt(value)); } WriteBarrierMode HeapObject::GetWriteBarrierMode( const DisallowHeapAllocation& promise) { Heap* heap = GetHeap(); if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER; if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER; return UPDATE_WRITE_BARRIER; } AllocationAlignment HeapObject::RequiredAlignment() { #ifdef V8_HOST_ARCH_32_BIT if ((IsFixedFloat64Array() || IsFixedDoubleArray()) && FixedArrayBase::cast(this)->length() != 0) { return kDoubleAligned; } if (IsHeapNumber()) return kDoubleUnaligned; if (IsSimd128Value()) return kSimd128Unaligned; #endif // V8_HOST_ARCH_32_BIT return kWordAligned; } void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { DCHECK(map() != GetHeap()->fixed_cow_array_map()); DCHECK(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; NOBARRIER_WRITE_FIELD(this, offset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); } void FixedArray::NoWriteBarrierSet(FixedArray* array, int index, Object* value) { DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map()); DCHECK(index >= 0 && index < array->length()); DCHECK(!array->GetHeap()->InNewSpace(value)); NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); } void FixedArray::set_undefined(int index) { DCHECK(map() != GetHeap()->fixed_cow_array_map()); DCHECK(index >= 0 && index < this->length()); DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->undefined_value()); } void FixedArray::set_null(int index) { DCHECK(index >= 0 && index < this->length()); DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->null_value()); } void FixedArray::set_the_hole(int index) { DCHECK(map() != GetHeap()->fixed_cow_array_map()); DCHECK(index >= 0 && index < this->length()); DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->the_hole_value()); } void FixedArray::FillWithHoles(int from, int to) { for (int i = from; i < to; i++) { set_the_hole(i); } } Object** FixedArray::data_start() { return HeapObject::RawField(this, kHeaderSize); } Object** FixedArray::RawFieldOfElementAt(int index) { return HeapObject::RawField(this, OffsetOfElementAt(index)); } #define DEFINE_FRAME_ARRAY_ACCESSORS(name, type) \ type* FrameArray::name(int frame_ix) const { \ Object* obj = \ get(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset); \ return type::cast(obj); \ } \ \ void FrameArray::Set##name(int frame_ix, type* value) { \ set(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset, value); \ } FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS) #undef DEFINE_FRAME_ARRAY_ACCESSORS bool FrameArray::IsWasmFrame(int frame_ix) const { const int flags = Flags(frame_ix)->value(); return (flags & kIsWasmFrame) != 0; } bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const { const int flags = Flags(frame_ix)->value(); return (flags & kIsAsmJsWasmFrame) != 0; } int FrameArray::FrameCount() const { const int frame_count = Smi::cast(get(kFrameCountIndex))->value(); DCHECK_LE(0, frame_count); return frame_count; } bool DescriptorArray::IsEmpty() { DCHECK(length() >= kFirstIndex || this == GetHeap()->empty_descriptor_array()); return length() < kFirstIndex; } int DescriptorArray::number_of_descriptors() { DCHECK(length() >= kFirstIndex || IsEmpty()); int len = length(); return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value(); } int DescriptorArray::number_of_descriptors_storage() { int len = length(); return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize; } int DescriptorArray::NumberOfSlackDescriptors() { return number_of_descriptors_storage() - number_of_descriptors(); } void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) { WRITE_FIELD( this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors)); } inline int DescriptorArray::number_of_entries() { return number_of_descriptors(); } bool DescriptorArray::HasEnumCache() { return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi(); } void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) { set(kEnumCacheIndex, array->get(kEnumCacheIndex)); } FixedArray* DescriptorArray::GetEnumCache() { DCHECK(HasEnumCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex)); } bool DescriptorArray::HasEnumIndicesCache() { if (IsEmpty()) return false; Object* object = get(kEnumCacheIndex); if (object->IsSmi()) return false; FixedArray* bridge = FixedArray::cast(object); return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi(); } FixedArray* DescriptorArray::GetEnumIndicesCache() { DCHECK(HasEnumIndicesCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex)); } Object** DescriptorArray::GetEnumCacheSlot() { DCHECK(HasEnumCache()); return HeapObject::RawField(reinterpret_cast
(this), kEnumCacheOffset); } // Perform a binary search in a fixed array. template
int BinarySearch(T* array, Name* name, int valid_entries, int* out_insertion_index) { DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL); int low = 0; int high = array->number_of_entries() - 1; uint32_t hash = name->hash_field(); int limit = high; DCHECK(low <= high); while (low != high) { int mid = low + (high - low) / 2; Name* mid_name = array->GetSortedKey(mid); uint32_t mid_hash = mid_name->hash_field(); if (mid_hash >= hash) { high = mid; } else { low = mid + 1; } } for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); Name* entry = array->GetKey(sort_index); uint32_t current_hash = entry->hash_field(); if (current_hash != hash) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1); } return T::kNotFound; } if (entry == name) { if (search_mode == ALL_ENTRIES || sort_index < valid_entries) { return sort_index; } return T::kNotFound; } } if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { *out_insertion_index = limit + 1; } return T::kNotFound; } // Perform a linear search in this fixed array. len is the number of entry // indices that are valid. template
int LinearSearch(T* array, Name* name, int valid_entries, int* out_insertion_index) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { uint32_t hash = name->hash_field(); int len = array->number_of_entries(); for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); Name* entry = array->GetKey(sorted_index); uint32_t current_hash = entry->hash_field(); if (current_hash > hash) { *out_insertion_index = sorted_index; return T::kNotFound; } if (entry == name) return sorted_index; } *out_insertion_index = len; return T::kNotFound; } else { DCHECK_LE(valid_entries, array->number_of_entries()); DCHECK_NULL(out_insertion_index); // Not supported here. for (int number = 0; number < valid_entries; number++) { if (array->GetKey(number) == name) return number; } return T::kNotFound; } } template
int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) { SLOW_DCHECK(array->IsSortedNoDuplicates()); if (valid_entries == 0) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { *out_insertion_index = 0; } return T::kNotFound; } // Fast case: do linear search for small arrays. const int kMaxElementsForLinearSearch = 8; if (valid_entries <= kMaxElementsForLinearSearch) { return LinearSearch
(array, name, valid_entries, out_insertion_index); } // Slow case: perform binary search. return BinarySearch
(array, name, valid_entries, out_insertion_index); } int DescriptorArray::Search(Name* name, int valid_descriptors) { DCHECK(name->IsUniqueName()); return internal::Search
(this, name, valid_descriptors, NULL); } int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) { DCHECK(name->IsUniqueName()); int number_of_own_descriptors = map->NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return kNotFound; DescriptorLookupCache* cache = isolate->descriptor_lookup_cache(); int number = cache->Lookup(map, name); if (number == DescriptorLookupCache::kAbsent) { number = Search(name, number_of_own_descriptors); cache->Update(map, name, number); } return number; } PropertyDetails Map::GetLastDescriptorDetails() { return instance_descriptors()->GetDetails(LastAdded()); } int Map::LastAdded() { int number_of_own_descriptors = NumberOfOwnDescriptors(); DCHECK(number_of_own_descriptors > 0); return number_of_own_descriptors - 1; } int Map::NumberOfOwnDescriptors() { return NumberOfOwnDescriptorsBits::decode(bit_field3()); } void Map::SetNumberOfOwnDescriptors(int number) { DCHECK(number <= instance_descriptors()->number_of_descriptors()); set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number)); } int Map::EnumLength() { return EnumLengthBits::decode(bit_field3()); } void Map::SetEnumLength(int length) { if (length != kInvalidEnumCacheSentinel) { DCHECK(length >= 0); DCHECK(length == 0 || instance_descriptors()->HasEnumCache()); DCHECK(length <= NumberOfOwnDescriptors()); } set_bit_field3(EnumLengthBits::update(bit_field3(), length)); } FixedArrayBase* Map::GetInitialElements() { FixedArrayBase* result = nullptr; if (has_fast_elements() || has_fast_string_wrapper_elements()) { result = GetHeap()->empty_fixed_array(); } else if (has_fast_sloppy_arguments_elements()) { result = GetHeap()->empty_sloppy_arguments_elements(); } else if (has_fixed_typed_array_elements()) { result = GetHeap()->EmptyFixedTypedArrayForMap(this); } else { UNREACHABLE(); } DCHECK(!GetHeap()->InNewSpace(result)); return result; } // static Handle
Map::ReconfigureProperty(Handle
map, int modify_index, PropertyKind new_kind, PropertyAttributes new_attributes, Representation new_representation, Handle
new_field_type, StoreMode store_mode) { return Reconfigure(map, map->elements_kind(), modify_index, new_kind, new_attributes, new_representation, new_field_type, store_mode); } // static Handle
Map::ReconfigureElementsKind(Handle
map, ElementsKind new_elements_kind) { return Reconfigure(map, new_elements_kind, -1, kData, NONE, Representation::None(), FieldType::None(map->GetIsolate()), ALLOW_IN_DESCRIPTOR); } Object** DescriptorArray::GetKeySlot(int descriptor_number) { DCHECK(descriptor_number < number_of_descriptors()); return RawFieldOfElementAt(ToKeyIndex(descriptor_number)); } Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) { return GetKeySlot(descriptor_number); } Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) { return GetValueSlot(descriptor_number - 1) + 1; } Name* DescriptorArray::GetKey(int descriptor_number) { DCHECK(descriptor_number < number_of_descriptors()); return Name::cast(get(ToKeyIndex(descriptor_number))); } int DescriptorArray::GetSortedKeyIndex(int descriptor_number) { return GetDetails(descriptor_number).pointer(); } Name* DescriptorArray::GetSortedKey(int descriptor_number) { return GetKey(GetSortedKeyIndex(descriptor_number)); } void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) { PropertyDetails details = GetDetails(descriptor_index); set(ToDetailsIndex(descriptor_index), details.set_pointer(pointer).AsSmi()); } void DescriptorArray::SetRepresentation(int descriptor_index, Representation representation) { DCHECK(!representation.IsNone()); PropertyDetails details = GetDetails(descriptor_index); set(ToDetailsIndex(descriptor_index), details.CopyWithRepresentation(representation).AsSmi()); } Object** DescriptorArray::GetValueSlot(int descriptor_number) { DCHECK(descriptor_number < number_of_descriptors()); return RawFieldOfElementAt(ToValueIndex(descriptor_number)); } int DescriptorArray::GetValueOffset(int descriptor_number) { return OffsetOfElementAt(ToValueIndex(descriptor_number)); } Object* DescriptorArray::GetValue(int descriptor_number) { DCHECK(descriptor_number < number_of_descriptors()); return get(ToValueIndex(descriptor_number)); } void DescriptorArray::SetValue(int descriptor_index, Object* value) { set(ToValueIndex(descriptor_index), value); } PropertyDetails DescriptorArray::GetDetails(int descriptor_number) { DCHECK(descriptor_number < number_of_descriptors()); Object* details = get(ToDetailsIndex(descriptor_number)); return PropertyDetails(Smi::cast(details)); } PropertyType DescriptorArray::GetType(int descriptor_number) { return GetDetails(descriptor_number).type(); } int DescriptorArray::GetFieldIndex(int descriptor_number) { DCHECK(GetDetails(descriptor_number).location() == kField); return GetDetails(descriptor_number).field_index(); } Object* DescriptorArray::GetConstant(int descriptor_number) { return GetValue(descriptor_number); } Object* DescriptorArray::GetCallbacksObject(int descriptor_number) { DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT); return GetValue(descriptor_number); } AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) { DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT); Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number)); return reinterpret_cast
(p->foreign_address()); } void DescriptorArray::Get(int descriptor_number, Descriptor* desc) { desc->Init(handle(GetKey(descriptor_number), GetIsolate()), handle(GetValue(descriptor_number), GetIsolate()), GetDetails(descriptor_number)); } void DescriptorArray::SetDescriptor(int descriptor_number, Descriptor* desc) { // Range check. DCHECK(descriptor_number < number_of_descriptors()); set(ToKeyIndex(descriptor_number), *desc->GetKey()); set(ToValueIndex(descriptor_number), *desc->GetValue()); set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); } void DescriptorArray::Set(int descriptor_number, Descriptor* desc) { // Range check. DCHECK(descriptor_number < number_of_descriptors()); set(ToKeyIndex(descriptor_number), *desc->GetKey()); set(ToValueIndex(descriptor_number), *desc->GetValue()); set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); } void DescriptorArray::Append(Descriptor* desc) { DisallowHeapAllocation no_gc; int descriptor_number = number_of_descriptors(); SetNumberOfDescriptors(descriptor_number + 1); Set(descriptor_number, desc); uint32_t hash = desc->GetKey()->Hash(); int insertion; for (insertion = descriptor_number; insertion > 0; --insertion) { Name* key = GetSortedKey(insertion - 1); if (key->Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } SetSortedKey(insertion, descriptor_number); } void DescriptorArray::SwapSortedKeys(int first, int second) { int first_key = GetSortedKeyIndex(first); SetSortedKey(first, GetSortedKeyIndex(second)); SetSortedKey(second, first_key); } PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); } Object* DescriptorArray::Entry::GetCallbackObject() { return descs_->GetValue(index_); } int HashTableBase::NumberOfElements() { return Smi::cast(get(kNumberOfElementsIndex))->value(); } int HashTableBase::NumberOfDeletedElements() { return Smi::cast(get(kNumberOfDeletedElementsIndex))->value(); } int HashTableBase::Capacity() { return Smi::cast(get(kCapacityIndex))->value(); } void HashTableBase::ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); } void HashTableBase::ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); SetNumberOfDeletedElements(NumberOfDeletedElements() + 1); } void HashTableBase::ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); SetNumberOfDeletedElements(NumberOfDeletedElements() + n); } // static int HashTableBase::ComputeCapacity(int at_least_space_for) { int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2); return Max(capacity, kMinCapacity); } bool HashTableBase::IsKey(Isolate* isolate, Object* k) { Heap* heap = isolate->heap(); return k != heap->the_hole_value() && k != heap->undefined_value(); } bool HashTableBase::IsKey(Object* k) { Isolate* isolate = this->GetIsolate(); return !k->IsTheHole(isolate) && !k->IsUndefined(isolate); } void HashTableBase::SetNumberOfElements(int nof) { set(kNumberOfElementsIndex, Smi::FromInt(nof)); } void HashTableBase::SetNumberOfDeletedElements(int nod) { set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod)); } template
Map* BaseShape
::GetMap(Isolate* isolate) { return isolate->heap()->hash_table_map(); } template
int HashTable
::FindEntry(Key key) { return FindEntry(GetIsolate(), key); } template
int HashTable
::FindEntry(Isolate* isolate, Key key) { return FindEntry(isolate, key, HashTable::Hash(key)); } // Find entry for key otherwise return kNotFound. template
int HashTable
::FindEntry(Isolate* isolate, Key key, int32_t hash) { uint32_t capacity = Capacity(); uint32_t entry = FirstProbe(hash, capacity); uint32_t count = 1; // EnsureCapacity will guarantee the hash table is never full. Object* undefined = isolate->heap()->undefined_value(); Object* the_hole = isolate->heap()->the_hole_value(); while (true) { Object* element = KeyAt(entry); // Empty entry. Uses raw unchecked accessors because it is called by the // string table during bootstrapping. if (element == undefined) break; if (element != the_hole && Shape::IsMatch(key, element)) return entry; entry = NextProbe(entry, count++, capacity); } return kNotFound; } template
bool HashTable
::Has(Key key) { return FindEntry(key) != kNotFound; } template
bool HashTable
::Has(Isolate* isolate, Key key) { return FindEntry(isolate, key) != kNotFound; } bool ObjectHashSet::Has(Isolate* isolate, Handle
key, int32_t hash) { return FindEntry(isolate, key, hash) != kNotFound; } bool ObjectHashSet::Has(Isolate* isolate, Handle
key) { Object* hash = key->GetHash(); if (!hash->IsSmi()) return false; return FindEntry(isolate, key, Smi::cast(hash)->value()) != kNotFound; } bool StringSetShape::IsMatch(String* key, Object* value) { return value->IsString() && key->Equals(String::cast(value)); } uint32_t StringSetShape::Hash(String* key) { return key->Hash(); } uint32_t StringSetShape::HashForObject(String* key, Object* object) { return object->IsString() ? String::cast(object)->Hash() : 0; } bool SeededNumberDictionary::requires_slow_elements() { Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return false; return 0 != (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask); } uint32_t SeededNumberDictionary::max_number_key() { DCHECK(!requires_slow_elements()); Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return 0; uint32_t value = static_cast
(Smi::cast(max_index_object)->value()); return value >> kRequiresSlowElementsTagSize; } void SeededNumberDictionary::set_requires_slow_elements() { set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask)); } // ------------------------------------ // Cast operations CAST_ACCESSOR(AbstractCode) CAST_ACCESSOR(ArrayList) CAST_ACCESSOR(Bool16x8) CAST_ACCESSOR(Bool32x4) CAST_ACCESSOR(Bool8x16) CAST_ACCESSOR(ByteArray) CAST_ACCESSOR(BytecodeArray) CAST_ACCESSOR(Cell) CAST_ACCESSOR(Code) CAST_ACCESSOR(CodeCacheHashTable) CAST_ACCESSOR(CompilationCacheTable) CAST_ACCESSOR(ConsString) CAST_ACCESSOR(DeoptimizationInputData) CAST_ACCESSOR(DeoptimizationOutputData) CAST_ACCESSOR(DependentCode) CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(ExternalOneByteString) CAST_ACCESSOR(ExternalString) CAST_ACCESSOR(ExternalTwoByteString) CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(FixedArrayBase) CAST_ACCESSOR(FixedDoubleArray) CAST_ACCESSOR(FixedTypedArrayBase) CAST_ACCESSOR(Float32x4) CAST_ACCESSOR(Foreign) CAST_ACCESSOR(FrameArray) CAST_ACCESSOR(GlobalDictionary) CAST_ACCESSOR(HandlerTable) CAST_ACCESSOR(HeapObject) CAST_ACCESSOR(Int16x8) CAST_ACCESSOR(Int32x4) CAST_ACCESSOR(Int8x16) CAST_ACCESSOR(JSArray) CAST_ACCESSOR(JSArrayBuffer) CAST_ACCESSOR(JSArrayBufferView) CAST_ACCESSOR(JSBoundFunction) CAST_ACCESSOR(JSDataView) CAST_ACCESSOR(JSDate) CAST_ACCESSOR(JSFunction) CAST_ACCESSOR(JSGeneratorObject) CAST_ACCESSOR(JSGlobalObject) CAST_ACCESSOR(JSGlobalProxy) CAST_ACCESSOR(JSMap) CAST_ACCESSOR(JSMapIterator) CAST_ACCESSOR(JSMessageObject) CAST_ACCESSOR(JSModuleNamespace) CAST_ACCESSOR(JSFixedArrayIterator) CAST_ACCESSOR(JSObject) CAST_ACCESSOR(JSProxy) CAST_ACCESSOR(JSReceiver) CAST_ACCESSOR(JSRegExp) CAST_ACCESSOR(JSSet) CAST_ACCESSOR(JSSetIterator) CAST_ACCESSOR(JSStringIterator) CAST_ACCESSOR(JSArrayIterator) CAST_ACCESSOR(JSTypedArray) CAST_ACCESSOR(JSValue) CAST_ACCESSOR(JSWeakCollection) CAST_ACCESSOR(JSWeakMap) CAST_ACCESSOR(JSWeakSet) CAST_ACCESSOR(LayoutDescriptor) CAST_ACCESSOR(Map) CAST_ACCESSOR(ModuleInfo) CAST_ACCESSOR(Name) CAST_ACCESSOR(NameDictionary) CAST_ACCESSOR(NormalizedMapCache) CAST_ACCESSOR(Object) CAST_ACCESSOR(ObjectHashTable) CAST_ACCESSOR(ObjectHashSet) CAST_ACCESSOR(Oddball) CAST_ACCESSOR(OrderedHashMap) CAST_ACCESSOR(OrderedHashSet) CAST_ACCESSOR(PropertyCell) CAST_ACCESSOR(TemplateList) CAST_ACCESSOR(RegExpMatchInfo) CAST_ACCESSOR(ScopeInfo) CAST_ACCESSOR(SeededNumberDictionary) CAST_ACCESSOR(SeqOneByteString) CAST_ACCESSOR(SeqString) CAST_ACCESSOR(SeqTwoByteString) CAST_ACCESSOR(SharedFunctionInfo) CAST_ACCESSOR(Simd128Value) CAST_ACCESSOR(SlicedString) CAST_ACCESSOR(Smi) CAST_ACCESSOR(String) CAST_ACCESSOR(StringSet) CAST_ACCESSOR(StringTable) CAST_ACCESSOR(Struct) CAST_ACCESSOR(Symbol) CAST_ACCESSOR(TemplateInfo) CAST_ACCESSOR(Uint16x8) CAST_ACCESSOR(Uint32x4) CAST_ACCESSOR(Uint8x16) CAST_ACCESSOR(UnseededNumberDictionary) CAST_ACCESSOR(WeakCell) CAST_ACCESSOR(WeakFixedArray) CAST_ACCESSOR(WeakHashTable) template
PodArray
* PodArray
::cast(Object* object) { SLOW_DCHECK(object->IsByteArray()); return reinterpret_cast
*>(object); } template
const PodArray
* PodArray
::cast(const Object* object) { SLOW_DCHECK(object->IsByteArray()); return reinterpret_cast
*>(object); } // static template
Handle
> PodArray
::New(Isolate* isolate, int length, PretenureFlag pretenure) { return Handle
>::cast( isolate->factory()->NewByteArray(length * sizeof(T), pretenure)); } // static template
STATIC_CONST_MEMBER_DEFINITION const InstanceType FixedTypedArray
::kInstanceType; template
FixedTypedArray
* FixedTypedArray
::cast(Object* object) { SLOW_DCHECK(object->IsHeapObject() && HeapObject::cast(object)->map()->instance_type() == Traits::kInstanceType); return reinterpret_cast
*>(object); } template
const FixedTypedArray
* FixedTypedArray
::cast(const Object* object) { SLOW_DCHECK(object->IsHeapObject() && HeapObject::cast(object)->map()->instance_type() == Traits::kInstanceType); return reinterpret_cast
*>(object); } #define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \ type* DeoptimizationInputData::name() { \ return type::cast(get(k##name##Index)); \ } \ void DeoptimizationInputData::Set##name(type* value) { \ set(k##name##Index, value); \ } DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray) DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi) DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray) DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrAstId, Smi) DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi) DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi) DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object) DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object) DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray
) #undef DEFINE_DEOPT_ELEMENT_ACCESSORS #define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \ type* DeoptimizationInputData::name(int i) { \ return type::cast(get(IndexForEntry(i) + k##name##Offset)); \ } \ void DeoptimizationInputData::Set##name(int i, type* value) { \ set(IndexForEntry(i) + k##name##Offset, value); \ } DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi) DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi) DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi) DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi) #undef DEFINE_DEOPT_ENTRY_ACCESSORS BailoutId DeoptimizationInputData::AstId(int i) { return BailoutId(AstIdRaw(i)->value()); } void DeoptimizationInputData::SetAstId(int i, BailoutId value) { SetAstIdRaw(i, Smi::FromInt(value.ToInt())); } int DeoptimizationInputData::DeoptCount() { return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize; } int DeoptimizationOutputData::DeoptPoints() { return length() / 2; } BailoutId DeoptimizationOutputData::AstId(int index) { return BailoutId(Smi::cast(get(index * 2))->value()); } void DeoptimizationOutputData::SetAstId(int index, BailoutId id) { set(index * 2, Smi::FromInt(id.ToInt())); } Smi* DeoptimizationOutputData::PcAndState(int index) { return Smi::cast(get(1 + index * 2)); } void DeoptimizationOutputData::SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); } Object* LiteralsArray::get(int index) const { return FixedArray::get(index); } void LiteralsArray::set(int index, Object* value) { FixedArray::set(index, value); } void LiteralsArray::set(int index, Smi* value) { FixedArray::set(index, value); } void LiteralsArray::set(int index, Object* value, WriteBarrierMode mode) { FixedArray::set(index, value, mode); } LiteralsArray* LiteralsArray::cast(Object* object) { SLOW_DCHECK(object->IsLiteralsArray()); return reinterpret_cast
(object); } TypeFeedbackVector* LiteralsArray::feedback_vector() const { if (length() == 0) { return TypeFeedbackVector::cast( const_cast
(FixedArray::cast(this))); } return TypeFeedbackVector::cast(get(kVectorIndex)); } void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) { if (length() <= kVectorIndex) { DCHECK(vector->length() == 0); return; } set(kVectorIndex, vector); } Object* LiteralsArray::literal(int literal_index) const { return get(kFirstLiteralIndex + literal_index); } void LiteralsArray::set_literal(int literal_index, Object* literal) { set(kFirstLiteralIndex + literal_index, literal); } void LiteralsArray::set_literal_undefined(int literal_index) { set_undefined(kFirstLiteralIndex + literal_index); } int LiteralsArray::literals_count() const { return length() - kFirstLiteralIndex; } int HandlerTable::GetRangeStart(int index) const { return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value(); } int HandlerTable::GetRangeEnd(int index) const { return Smi::cast(get(index * kRangeEntrySize + kRangeEndIndex))->value(); } int HandlerTable::GetRangeHandler(int index) const { return HandlerOffsetField::decode( Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value()); } int HandlerTable::GetRangeData(int index) const { return Smi::cast(get(index * kRangeEntrySize + kRangeDataIndex))->value(); } void HandlerTable::SetRangeStart(int index, int value) { set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value)); } void HandlerTable::SetRangeEnd(int index, int value) { set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value)); } void HandlerTable::SetRangeHandler(int index, int offset, CatchPrediction prediction) { int value = HandlerOffsetField::encode(offset) | HandlerPredictionField::encode(prediction); set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value)); } void HandlerTable::SetRangeData(int index, int value) { set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value)); } void HandlerTable::SetReturnOffset(int index, int value) { set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value)); } void HandlerTable::SetReturnHandler(int index, int offset) { int value = HandlerOffsetField::encode(offset); set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value)); } int HandlerTable::NumberOfRangeEntries() const { return length() / kRangeEntrySize; } #define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name) STRUCT_LIST(MAKE_STRUCT_CAST) #undef MAKE_STRUCT_CAST template
HashTable
* HashTable
::cast(Object* obj) { SLOW_DCHECK(obj->IsHashTable()); return reinterpret_cast
(obj); } template
const HashTable
* HashTable
::cast(const Object* obj) { SLOW_DCHECK(obj->IsHashTable()); return reinterpret_cast
(obj); } SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SMI_ACCESSORS(FreeSpace, size, kSizeOffset) NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset) SMI_ACCESSORS(String, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset) int FreeSpace::Size() { return size(); } FreeSpace* FreeSpace::next() { DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) || (!GetHeap()->deserialization_complete() && map() == NULL)); DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size()); return reinterpret_cast
( Memory::Address_at(address() + kNextOffset)); } void FreeSpace::set_next(FreeSpace* next) { DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) || (!GetHeap()->deserialization_complete() && map() == NULL)); DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size()); base::NoBarrier_Store( reinterpret_cast
(address() + kNextOffset), reinterpret_cast
(next)); } FreeSpace* FreeSpace::cast(HeapObject* o) { SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace()); return reinterpret_cast
(o); } uint32_t Name::hash_field() { return READ_UINT32_FIELD(this, kHashFieldOffset); } void Name::set_hash_field(uint32_t value) { WRITE_UINT32_FIELD(this, kHashFieldOffset, value); #if V8_HOST_ARCH_64_BIT #if V8_TARGET_LITTLE_ENDIAN WRITE_UINT32_FIELD(this, kHashFieldSlot + kIntSize, 0); #else WRITE_UINT32_FIELD(this, kHashFieldSlot, 0); #endif #endif } bool Name::Equals(Name* other) { if (other == this) return true; if ((this->IsInternalizedString() && other->IsInternalizedString()) || this->IsSymbol() || other->IsSymbol()) { return false; } return String::cast(this)->SlowEquals(String::cast(other)); } bool Name::Equals(Handle
one, Handle
two) { if (one.is_identical_to(two)) return true; if ((one->IsInternalizedString() && two->IsInternalizedString()) || one->IsSymbol() || two->IsSymbol()) { return false; } return String::SlowEquals(Handle
::cast(one), Handle
::cast(two)); } ACCESSORS(Symbol, name, Object, kNameOffset) SMI_ACCESSORS(Symbol, flags, kFlagsOffset) BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit) BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit) bool String::Equals(String* other) { if (other == this) return true; if (this->IsInternalizedString() && other->IsInternalizedString()) { return false; } return SlowEquals(other); } bool String::Equals(Handle
one, Handle
two) { if (one.is_identical_to(two)) return true; if (one->IsInternalizedString() && two->IsInternalizedString()) { return false; } return SlowEquals(one, two); } Handle
String::Flatten(Handle
string, PretenureFlag pretenure) { if (!string->IsConsString()) return string; Handle
cons = Handle
::cast(string); if (cons->IsFlat()) return handle(cons->first()); return SlowFlatten(cons, pretenure); } uint16_t String::Get(int index) { DCHECK(index >= 0 && index < length()); switch (StringShape(this).full_representation_tag()) { case kSeqStringTag | kOneByteStringTag: return SeqOneByteString::cast(this)->SeqOneByteStringGet(index); case kSeqStringTag | kTwoByteStringTag: return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index); case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(this)->ConsStringGet(index); case kExternalStringTag | kOneByteStringTag: return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index); case kExternalStringTag | kTwoByteStringTag: return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index); case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: return SlicedString::cast(this)->SlicedStringGet(index); default: break; } UNREACHABLE(); return 0; } void String::Set(int index, uint16_t value) { DCHECK(index >= 0 && index < length()); DCHECK(StringShape(this).IsSequential()); return this->IsOneByteRepresentation() ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value) : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value); } bool String::IsFlat() { if (!StringShape(this).IsCons()) return true; return ConsString::cast(this)->second()->length() == 0; } String* String::GetUnderlying() { // Giving direct access to underlying string only makes sense if the // wrapping string is already flattened. DCHECK(this->IsFlat()); DCHECK(StringShape(this).IsIndirect()); STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset); const int kUnderlyingOffset = SlicedString::kParentOffset; return String::cast(READ_FIELD(this, kUnderlyingOffset)); } template
ConsString* String::VisitFlat(Visitor* visitor, String* string, const int offset) { int slice_offset = offset; const int length = string->length(); DCHECK(offset <= length); while (true) { int32_t type = string->map()->instance_type(); switch (type & (kStringRepresentationMask | kStringEncodingMask)) { case kSeqStringTag | kOneByteStringTag: visitor->VisitOneByteString( SeqOneByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kSeqStringTag | kTwoByteStringTag: visitor->VisitTwoByteString( SeqTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kExternalStringTag | kOneByteStringTag: visitor->VisitOneByteString( ExternalOneByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kExternalStringTag | kTwoByteStringTag: visitor->VisitTwoByteString( ExternalTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: { SlicedString* slicedString = SlicedString::cast(string); slice_offset += slicedString->offset(); string = slicedString->parent(); continue; } case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(string); default: UNREACHABLE(); return NULL; } } } template <> inline Vector
String::GetCharVector() { String::FlatContent flat = GetFlatContent(); DCHECK(flat.IsOneByte()); return flat.ToOneByteVector(); } template <> inline Vector
String::GetCharVector() { String::FlatContent flat = GetFlatContent(); DCHECK(flat.IsTwoByte()); return flat.ToUC16Vector(); } uint16_t SeqOneByteString::SeqOneByteStringGet(int index) { DCHECK(index >= 0 && index < length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) { DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, static_cast
(value)); } Address SeqOneByteString::GetCharsAddress() { return FIELD_ADDR(this, kHeaderSize); } uint8_t* SeqOneByteString::GetChars() { return reinterpret_cast
(GetCharsAddress()); } Address SeqTwoByteString::GetCharsAddress() { return FIELD_ADDR(this, kHeaderSize); } uc16* SeqTwoByteString::GetChars() { return reinterpret_cast
(FIELD_ADDR(this, kHeaderSize)); } uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) { DCHECK(index >= 0 && index < length()); return READ_UINT16_FIELD(this, kHeaderSize + index * kShortSize); } void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) { DCHECK(index >= 0 && index < length()); WRITE_UINT16_FIELD(this, kHeaderSize + index * kShortSize, value); } int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) { return SizeFor(length()); } int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) { return SizeFor(length()); } String* SlicedString::parent() { return String::cast(READ_FIELD(this, kParentOffset)); } void SlicedString::set_parent(String* parent, WriteBarrierMode mode) { DCHECK(parent->IsSeqString() || parent->IsExternalString()); WRITE_FIELD(this, kParentOffset, parent); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode); } SMI_ACCESSORS(SlicedString, offset, kOffsetOffset) String* ConsString::first() { return String::cast(READ_FIELD(this, kFirstOffset)); } Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); } void ConsString::set_first(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kFirstOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode); } String* ConsString::second() { return String::cast(READ_FIELD(this, kSecondOffset)); } Object* ConsString::unchecked_second() { return READ_FIELD(this, kSecondOffset); } void ConsString::set_second(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kSecondOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode); } bool ExternalString::is_short() { InstanceType type = map()->instance_type(); return (type & kShortExternalStringMask) == kShortExternalStringTag; } const ExternalOneByteString::Resource* ExternalOneByteString::resource() { return *reinterpret_cast
(FIELD_ADDR(this, kResourceOffset)); } void ExternalOneByteString::update_data_cache() { if (is_short()) return; const char** data_field = reinterpret_cast
(FIELD_ADDR(this, kResourceDataOffset)); *data_field = resource()->data(); } void ExternalOneByteString::set_resource( const ExternalOneByteString::Resource* resource) { DCHECK(IsAligned(reinterpret_cast
(resource), kPointerSize)); *reinterpret_cast
( FIELD_ADDR(this, kResourceOffset)) = resource; if (resource != NULL) update_data_cache(); } const uint8_t* ExternalOneByteString::GetChars() { return reinterpret_cast
(resource()->data()); } uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) { DCHECK(index >= 0 && index < length()); return GetChars()[index]; } const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() { return *reinterpret_cast
(FIELD_ADDR(this, kResourceOffset)); } void ExternalTwoByteString::update_data_cache() { if (is_short()) return; const uint16_t** data_field = reinterpret_cast