Where Online Learning is simpler!
The C and C++ Include Header Files
/usr/include/node/v8-internal.h
$ cat -n /usr/include/node/v8-internal.h 1 // Copyright 2018 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef INCLUDE_V8_INTERNAL_H_ 6 #define INCLUDE_V8_INTERNAL_H_ 7 8 #include
9 #include
10 #include
11 12 #include
13 #include
14 #include
15 #include
16 17 #include "v8config.h" // NOLINT(build/include_directory) 18 19 namespace v8 { 20 21 class Array; 22 class Context; 23 class Data; 24 class Isolate; 25 26 namespace internal { 27 28 class Heap; 29 class Isolate; 30 31 typedef uintptr_t Address; 32 static constexpr Address kNullAddress = 0; 33 34 constexpr int KB = 1024; 35 constexpr int MB = KB * 1024; 36 constexpr int GB = MB * 1024; 37 #ifdef V8_TARGET_ARCH_X64 38 constexpr size_t TB = size_t{GB} * 1024; 39 #endif 40 41 /** 42 * Configuration of tagging scheme. 43 */ 44 const int kApiSystemPointerSize = sizeof(void*); 45 const int kApiDoubleSize = sizeof(double); 46 const int kApiInt32Size = sizeof(int32_t); 47 const int kApiInt64Size = sizeof(int64_t); 48 const int kApiSizetSize = sizeof(size_t); 49 50 // Tag information for HeapObject. 51 const int kHeapObjectTag = 1; 52 const int kWeakHeapObjectTag = 3; 53 const int kHeapObjectTagSize = 2; 54 const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; 55 const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1); 56 57 // Tag information for fowarding pointers stored in object headers. 58 // 0b00 at the lowest 2 bits in the header indicates that the map word is a 59 // forwarding pointer. 60 const int kForwardingTag = 0; 61 const int kForwardingTagSize = 2; 62 const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1; 63 64 // Tag information for Smi. 65 const int kSmiTag = 0; 66 const int kSmiTagSize = 1; 67 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; 68 69 template
70 struct SmiTagging; 71 72 constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1}; 73 constexpr uintptr_t kUintptrAllBitsSet = 74 static_cast
(kIntptrAllBitsSet); 75 76 // Smi constants for systems where tagged pointer is a 32-bit value. 77 template <> 78 struct SmiTagging<4> { 79 enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; 80 81 static constexpr intptr_t kSmiMinValue = 82 static_cast
(kUintptrAllBitsSet << (kSmiValueSize - 1)); 83 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); 84 85 V8_INLINE static constexpr int SmiToInt(Address value) { 86 int shift_bits = kSmiTagSize + kSmiShiftSize; 87 // Truncate and shift down (requires >> to be sign extending). 88 return static_cast
(static_cast
(value)) >> shift_bits; 89 } 90 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 91 // Is value in range [kSmiMinValue, kSmiMaxValue]. 92 // Use unsigned operations in order to avoid undefined behaviour in case of 93 // signed integer overflow. 94 return (static_cast
(value) - 95 static_cast
(kSmiMinValue)) <= 96 (static_cast
(kSmiMaxValue) - 97 static_cast
(kSmiMinValue)); 98 } 99 }; 100 101 // Smi constants for systems where tagged pointer is a 64-bit value. 102 template <> 103 struct SmiTagging<8> { 104 enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; 105 106 static constexpr intptr_t kSmiMinValue = 107 static_cast
(kUintptrAllBitsSet << (kSmiValueSize - 1)); 108 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); 109 110 V8_INLINE static constexpr int SmiToInt(Address value) { 111 int shift_bits = kSmiTagSize + kSmiShiftSize; 112 // Shift down and throw away top 32 bits. 113 return static_cast
(static_cast
(value) >> shift_bits); 114 } 115 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 116 // To be representable as a long smi, the value must be a 32-bit integer. 117 return (value == static_cast
(value)); 118 } 119 }; 120 121 #ifdef V8_COMPRESS_POINTERS 122 // See v8:7703 or src/common/ptr-compr-inl.h for details about pointer 123 // compression. 124 constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; 125 constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; 126 127 static_assert( 128 kApiSystemPointerSize == kApiInt64Size, 129 "Pointer compression can be enabled only for 64-bit architectures"); 130 const int kApiTaggedSize = kApiInt32Size; 131 #else 132 const int kApiTaggedSize = kApiSystemPointerSize; 133 #endif 134 135 constexpr bool PointerCompressionIsEnabled() { 136 return kApiTaggedSize != kApiSystemPointerSize; 137 } 138 139 #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH 140 using PlatformSmiTagging = SmiTagging
; 141 #else 142 using PlatformSmiTagging = SmiTagging
; 143 #endif 144 145 // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize 146 // since it's used much more often than the inividual constants. 147 const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; 148 const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; 149 const int kSmiMinValue = static_cast
(PlatformSmiTagging::kSmiMinValue); 150 const int kSmiMaxValue = static_cast
(PlatformSmiTagging::kSmiMaxValue); 151 constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } 152 constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } 153 constexpr bool Is64() { return kApiSystemPointerSize == sizeof(int64_t); } 154 155 V8_INLINE static constexpr Address IntToSmi(int value) { 156 return (static_cast
(value) << (kSmiTagSize + kSmiShiftSize)) | 157 kSmiTag; 158 } 159 160 /* 161 * Sandbox related types, constants, and functions. 162 */ 163 constexpr bool SandboxIsEnabled() { 164 #ifdef V8_ENABLE_SANDBOX 165 return true; 166 #else 167 return false; 168 #endif 169 } 170 171 // SandboxedPointers are guaranteed to point into the sandbox. This is achieved 172 // for example by storing them as offset rather than as raw pointers. 173 using SandboxedPointer_t = Address; 174 175 #ifdef V8_ENABLE_SANDBOX 176 177 // Size of the sandbox, excluding the guard regions surrounding it. 178 #if defined(V8_TARGET_OS_ANDROID) 179 // On Android, most 64-bit devices seem to be configured with only 39 bits of 180 // virtual address space for userspace. As such, limit the sandbox to 128GB (a 181 // quarter of the total available address space). 182 constexpr size_t kSandboxSizeLog2 = 37; // 128 GB 183 #elif defined(V8_TARGET_ARCH_LOONG64) 184 // Some Linux distros on LoongArch64 configured with only 40 bits of virtual 185 // address space for userspace. Limit the sandbox to 256GB here. 186 constexpr size_t kSandboxSizeLog2 = 38; // 256 GB 187 #else 188 // Everywhere else use a 1TB sandbox. 189 constexpr size_t kSandboxSizeLog2 = 40; // 1 TB 190 #endif // V8_TARGET_OS_ANDROID 191 constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2; 192 193 // Required alignment of the sandbox. For simplicity, we require the 194 // size of the guard regions to be a multiple of this, so that this specifies 195 // the alignment of the sandbox including and excluding surrounding guard 196 // regions. The alignment requirement is due to the pointer compression cage 197 // being located at the start of the sandbox. 198 constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment; 199 200 // Sandboxed pointers are stored inside the heap as offset from the sandbox 201 // base shifted to the left. This way, it is guaranteed that the offset is 202 // smaller than the sandbox size after shifting it to the right again. This 203 // constant specifies the shift amount. 204 constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2; 205 206 // Size of the guard regions surrounding the sandbox. This assumes a worst-case 207 // scenario of a 32-bit unsigned index used to access an array of 64-bit 208 // values. 209 constexpr size_t kSandboxGuardRegionSize = 32ULL * GB; 210 211 static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0, 212 "The size of the guard regions around the sandbox must be a " 213 "multiple of its required alignment."); 214 215 // On OSes where reserving virtual memory is too expensive to reserve the 216 // entire address space backing the sandbox, notably Windows pre 8.1, we create 217 // a partially reserved sandbox that doesn't actually reserve most of the 218 // memory, and so doesn't have the desired security properties as unrelated 219 // memory allocations could end up inside of it, but which still ensures that 220 // objects that should be located inside the sandbox are allocated within 221 // kSandboxSize bytes from the start of the sandbox. The minimum size of the 222 // region that is actually reserved for such a sandbox is specified by this 223 // constant and should be big enough to contain the pointer compression cage as 224 // well as the ArrayBuffer partition. 225 constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB; 226 227 static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize, 228 "The minimum reservation size for a sandbox must be larger than " 229 "the pointer compression cage contained within it."); 230 231 // The maximum buffer size allowed inside the sandbox. This is mostly dependent 232 // on the size of the guard regions around the sandbox: an attacker must not be 233 // able to construct a buffer that appears larger than the guard regions and 234 // thereby "reach out of" the sandbox. 235 constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1; 236 static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize, 237 "The maximum allowed buffer size must not be larger than the " 238 "sandbox's guard regions"); 239 240 constexpr size_t kBoundedSizeShift = 29; 241 static_assert(1ULL << (64 - kBoundedSizeShift) == 242 kMaxSafeBufferSizeForSandbox + 1, 243 "The maximum size of a BoundedSize must be synchronized with the " 244 "kMaxSafeBufferSizeForSandbox"); 245 246 #endif // V8_ENABLE_SANDBOX 247 248 #ifdef V8_COMPRESS_POINTERS 249 250 #ifdef V8_TARGET_OS_ANDROID 251 // The size of the virtual memory reservation for an external pointer table. 252 // This determines the maximum number of entries in a table. Using a maximum 253 // size allows omitting bounds checks on table accesses if the indices are 254 // guaranteed (e.g. through shifting) to be below the maximum index. This 255 // value must be a power of two. 256 constexpr size_t kExternalPointerTableReservationSize = 512 * MB; 257 258 // The external pointer table indices stored in HeapObjects as external 259 // pointers are shifted to the left by this amount to guarantee that they are 260 // smaller than the maximum table size. 261 constexpr uint32_t kExternalPointerIndexShift = 6; 262 #else 263 constexpr size_t kExternalPointerTableReservationSize = 1024 * MB; 264 constexpr uint32_t kExternalPointerIndexShift = 5; 265 #endif // V8_TARGET_OS_ANDROID 266 267 // The maximum number of entries in an external pointer table. 268 constexpr int kExternalPointerTableEntrySize = 8; 269 constexpr int kExternalPointerTableEntrySizeLog2 = 3; 270 constexpr size_t kMaxExternalPointers = 271 kExternalPointerTableReservationSize / kExternalPointerTableEntrySize; 272 static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers, 273 "kExternalPointerTableReservationSize and " 274 "kExternalPointerIndexShift don't match"); 275 276 #else // !V8_COMPRESS_POINTERS 277 278 // Needed for the V8.SandboxedExternalPointersCount histogram. 279 constexpr size_t kMaxExternalPointers = 0; 280 281 #endif // V8_COMPRESS_POINTERS 282 283 // A ExternalPointerHandle represents a (opaque) reference to an external 284 // pointer that can be stored inside the sandbox. A ExternalPointerHandle has 285 // meaning only in combination with an (active) Isolate as it references an 286 // external pointer stored in the currently active Isolate's 287 // ExternalPointerTable. Internally, an ExternalPointerHandles is simply an 288 // index into an ExternalPointerTable that is shifted to the left to guarantee 289 // that it is smaller than the size of the table. 290 using ExternalPointerHandle = uint32_t; 291 292 // ExternalPointers point to objects located outside the sandbox. When the V8 293 // sandbox is enabled, these are stored on heap as ExternalPointerHandles, 294 // otherwise they are simply raw pointers. 295 #ifdef V8_ENABLE_SANDBOX 296 using ExternalPointer_t = ExternalPointerHandle; 297 #else 298 using ExternalPointer_t = Address; 299 #endif 300 301 constexpr ExternalPointer_t kNullExternalPointer = 0; 302 constexpr ExternalPointerHandle kNullExternalPointerHandle = 0; 303 304 // 305 // External Pointers. 306 // 307 // When the sandbox is enabled, external pointers are stored in an external 308 // pointer table and are referenced from HeapObjects through an index (a 309 // "handle"). When stored in the table, the pointers are tagged with per-type 310 // tags to prevent type confusion attacks between different external objects. 311 // Besides type information bits, these tags also contain the GC marking bit 312 // which indicates whether the pointer table entry is currently alive. When a 313 // pointer is written into the table, the tag is ORed into the top bits. When 314 // that pointer is later loaded from the table, it is ANDed with the inverse of 315 // the expected tag. If the expected and actual type differ, this will leave 316 // some of the top bits of the pointer set, rendering the pointer inaccessible. 317 // The AND operation also removes the GC marking bit from the pointer. 318 // 319 // The tags are constructed such that UNTAG(TAG(0, T1), T2) != 0 for any two 320 // (distinct) tags T1 and T2. In practice, this is achieved by generating tags 321 // that all have the same number of zeroes and ones but different bit patterns. 322 // With N type tag bits, this allows for (N choose N/2) possible type tags. 323 // Besides the type tag bits, the tags also have the GC marking bit set so that 324 // the marking bit is automatically set when a pointer is written into the 325 // external pointer table (in which case it is clearly alive) and is cleared 326 // when the pointer is loaded. The exception to this is the free entry tag, 327 // which doesn't have the mark bit set, as the entry is not alive. This 328 // construction allows performing the type check and removing GC marking bits 329 // from the pointer in one efficient operation (bitwise AND). The number of 330 // available bits is limited in the following way: on x64, bits [47, 64) are 331 // generally available for tagging (userspace has 47 address bits available). 332 // On Arm64, userspace typically has a 40 or 48 bit address space. However, due 333 // to top-byte ignore (TBI) and memory tagging (MTE), the top byte is unusable 334 // for type checks as type-check failures would go unnoticed or collide with 335 // MTE bits. Some bits of the top byte can, however, still be used for the GC 336 // marking bit. The bits available for the type tags are therefore limited to 337 // [48, 56), i.e. (8 choose 4) = 70 different types. 338 // The following options exist to increase the number of possible types: 339 // - Using multiple ExternalPointerTables since tags can safely be reused 340 // across different tables 341 // - Using "extended" type checks, where additional type information is stored 342 // either in an adjacent pointer table entry or at the pointed-to location 343 // - Using a different tagging scheme, for example based on XOR which would 344 // allow for 2**8 different tags but require a separate operation to remove 345 // the marking bit 346 // 347 // The external pointer sandboxing mechanism ensures that every access to an 348 // external pointer field will result in a valid pointer of the expected type 349 // even in the presence of an attacker able to corrupt memory inside the 350 // sandbox. However, if any data related to the external object is stored 351 // inside the sandbox it may still be corrupted and so must be validated before 352 // use or moved into the external object. Further, an attacker will always be 353 // able to substitute different external pointers of the same type for each 354 // other. Therefore, code using external pointers must be written in a 355 // "substitution-safe" way, i.e. it must always be possible to substitute 356 // external pointers of the same type without causing memory corruption outside 357 // of the sandbox. Generally this is achieved by referencing any group of 358 // related external objects through a single external pointer. 359 // 360 // Currently we use bit 62 for the marking bit which should always be unused as 361 // it's part of the non-canonical address range. When Arm's top-byte ignore 362 // (TBI) is enabled, this bit will be part of the ignored byte, and we assume 363 // that the Embedder is not using this byte (really only this one bit) for any 364 // other purpose. This bit also does not collide with the memory tagging 365 // extension (MTE) which would use bits [56, 60). 366 // 367 // External pointer tables are also available even when the sandbox is off but 368 // pointer compression is on. In that case, the mechanism can be used to easy 369 // alignment requirements as it turns unaligned 64-bit raw pointers into 370 // aligned 32-bit indices. To "opt-in" to the external pointer table mechanism 371 // for this purpose, instead of using the ExternalPointer accessors one needs to 372 // use ExternalPointerHandles directly and use them to access the pointers in an 373 // ExternalPointerTable. 374 constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62; 375 constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000; 376 constexpr uint64_t kExternalPointerTagMaskWithoutMarkBit = 0xff000000000000; 377 constexpr uint64_t kExternalPointerTagShift = 48; 378 379 // All possible 8-bit type tags. 380 // These are sorted so that tags can be grouped together and it can efficiently 381 // be checked if a tag belongs to a given group. See for example the 382 // IsSharedExternalPointerType routine. 383 constexpr uint64_t kAllExternalPointerTypeTags[] = { 384 0b00001111, 0b00010111, 0b00011011, 0b00011101, 0b00011110, 0b00100111, 385 0b00101011, 0b00101101, 0b00101110, 0b00110011, 0b00110101, 0b00110110, 386 0b00111001, 0b00111010, 0b00111100, 0b01000111, 0b01001011, 0b01001101, 387 0b01001110, 0b01010011, 0b01010101, 0b01010110, 0b01011001, 0b01011010, 388 0b01011100, 0b01100011, 0b01100101, 0b01100110, 0b01101001, 0b01101010, 389 0b01101100, 0b01110001, 0b01110010, 0b01110100, 0b01111000, 0b10000111, 390 0b10001011, 0b10001101, 0b10001110, 0b10010011, 0b10010101, 0b10010110, 391 0b10011001, 0b10011010, 0b10011100, 0b10100011, 0b10100101, 0b10100110, 392 0b10101001, 0b10101010, 0b10101100, 0b10110001, 0b10110010, 0b10110100, 393 0b10111000, 0b11000011, 0b11000101, 0b11000110, 0b11001001, 0b11001010, 394 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001, 395 0b11100010, 0b11100100, 0b11101000, 0b11110000}; 396 397 #define TAG(i) \ 398 ((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \ 399 kExternalPointerMarkBit) 400 401 // clang-format off 402 403 // When adding new tags, please ensure that the code using these tags is 404 // "substitution-safe", i.e. still operate safely if external pointers of the 405 // same type are swapped by an attacker. See comment above for more details. 406 407 // Shared external pointers are owned by the shared Isolate and stored in the 408 // shared external pointer table associated with that Isolate, where they can 409 // be accessed from multiple threads at the same time. The objects referenced 410 // in this way must therefore always be thread-safe. 411 #define SHARED_EXTERNAL_POINTER_TAGS(V) \ 412 V(kFirstSharedTag, TAG(0)) \ 413 V(kWaiterQueueNodeTag, TAG(0)) \ 414 V(kExternalStringResourceTag, TAG(1)) \ 415 V(kExternalStringResourceDataTag, TAG(2)) \ 416 V(kLastSharedTag, TAG(2)) 417 418 // External pointers using these tags are kept in a per-Isolate external 419 // pointer table and can only be accessed when this Isolate is active. 420 #define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \ 421 V(kForeignForeignAddressTag, TAG(10)) \ 422 V(kNativeContextMicrotaskQueueTag, TAG(11)) \ 423 V(kEmbedderDataSlotPayloadTag, TAG(12)) \ 424 /* This tag essentially stands for a `void*` pointer in the V8 API, and */ \ 425 /* it is the Embedder's responsibility to ensure type safety (against */ \ 426 /* substitution) and lifetime validity of these objects. */ \ 427 V(kExternalObjectValueTag, TAG(13)) \ 428 V(kFunctionTemplateInfoCallbackTag, TAG(14)) \ 429 V(kAccessorInfoGetterTag, TAG(15)) \ 430 V(kAccessorInfoSetterTag, TAG(16)) \ 431 V(kWasmInternalFunctionCallTargetTag, TAG(17)) \ 432 V(kWasmTypeInfoNativeTypeTag, TAG(18)) \ 433 V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \ 434 V(kWasmContinuationJmpbufTag, TAG(20)) \ 435 V(kWasmIndirectFunctionTargetTag, TAG(21)) \ 436 V(kArrayBufferExtensionTag, TAG(22)) 437 438 // All external pointer tags. 439 #define ALL_EXTERNAL_POINTER_TAGS(V) \ 440 SHARED_EXTERNAL_POINTER_TAGS(V) \ 441 PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) 442 443 #define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag, 444 #define MAKE_TAG(HasMarkBit, TypeTag) \ 445 ((static_cast
(TypeTag) << kExternalPointerTagShift) | \ 446 (HasMarkBit ? kExternalPointerMarkBit : 0)) 447 enum ExternalPointerTag : uint64_t { 448 // Empty tag value. Mostly used as placeholder. 449 kExternalPointerNullTag = MAKE_TAG(1, 0b00000000), 450 // External pointer tag that will match any external pointer. Use with care! 451 kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111), 452 // The free entry tag has all type bits set so every type check with a 453 // different type fails. It also doesn't have the mark bit set as free 454 // entries are (by definition) not alive. 455 kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111), 456 // Evacuation entries are used during external pointer table compaction. 457 kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111), 458 459 ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM) 460 }; 461 462 #undef MAKE_TAG 463 #undef TAG 464 #undef EXTERNAL_POINTER_TAG_ENUM 465 466 // clang-format on 467 468 // True if the external pointer must be accessed from the shared isolate's 469 // external pointer table. 470 V8_INLINE static constexpr bool IsSharedExternalPointerType( 471 ExternalPointerTag tag) { 472 return tag >= kFirstSharedTag && tag <= kLastSharedTag; 473 } 474 475 // True if the external pointer may live in a read-only object, in which case 476 // the table entry will be in the shared read-only segment of the external 477 // pointer table. 478 V8_INLINE static constexpr bool IsMaybeReadOnlyExternalPointerType( 479 ExternalPointerTag tag) { 480 return tag == kAccessorInfoGetterTag || tag == kAccessorInfoSetterTag || 481 tag == kFunctionTemplateInfoCallbackTag; 482 } 483 484 // Sanity checks. 485 #define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ 486 static_assert(IsSharedExternalPointerType(Tag)); 487 #define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ 488 static_assert(!IsSharedExternalPointerType(Tag)); 489 490 SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS) 491 PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS) 492 493 #undef CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS 494 #undef CHECK_SHARED_EXTERNAL_POINTER_TAGS 495 496 #undef SHARED_EXTERNAL_POINTER_TAGS 497 #undef EXTERNAL_POINTER_TAGS 498 499 // 500 // Indirect Pointers. 501 // 502 // When the sandbox is enabled, indirect pointers are used to reference 503 // HeapObjects that live outside of the sandbox (but are still managed by V8's 504 // garbage collector). When object A references an object B through an indirect 505 // pointer, object A will contain a IndirectPointerHandle, i.e. a shifted 506 // 32-bit index, which identifies an entry in a pointer table (either the 507 // trusted pointer table for TrustedObjects, or the code pointer table if it is 508 // a Code object). This table entry then contains the actual pointer to object 509 // B. Further, object B owns this pointer table entry, and it is responsible 510 // for updating the "self-pointer" in the entry when it is relocated in memory. 511 // This way, in contrast to "normal" pointers, indirect pointers never need to 512 // be tracked by the GC (i.e. there is no remembered set for them). 513 // These pointers do not exist when the sandbox is disabled. 514 515 // An IndirectPointerHandle represents a 32-bit index into a pointer table. 516 using IndirectPointerHandle = uint32_t; 517 518 // A null handle always references an entry that contains nullptr. 519 constexpr IndirectPointerHandle kNullIndirectPointerHandle = 0; 520 521 // When the sandbox is enabled, indirect pointers are used to implement: 522 // - TrustedPointers: an indirect pointer using the trusted pointer table (TPT) 523 // and referencing a TrustedObject in one of the trusted heap spaces. 524 // - CodePointers, an indirect pointer using the code pointer table (CPT) and 525 // referencing a Code object together with its instruction stream. 526 527 // 528 // Trusted Pointers. 529 // 530 // A pointer to a TrustedObject. 531 // When the sandbox is enabled, these are indirect pointers using the trusted 532 // pointer table (TPT). They are used to reference trusted objects (located in 533 // one of V8's trusted heap spaces, outside of the sandbox) from inside the 534 // sandbox in a memory-safe way. When the sandbox is disabled, these are 535 // regular tagged pointers. 536 using TrustedPointerHandle = IndirectPointerHandle; 537 538 // The size of the virtual memory reservation for the trusted pointer table. 539 // As with the external pointer table, a maximum table size in combination with 540 // shifted indices allows omitting bounds checks. 541 constexpr size_t kTrustedPointerTableReservationSize = 64 * MB; 542 543 // The trusted pointer handles are stores shifted to the left by this amount 544 // to guarantee that they are smaller than the maximum table size. 545 constexpr uint32_t kTrustedPointerHandleShift = 9; 546 547 // A null handle always references an entry that contains nullptr. 548 constexpr TrustedPointerHandle kNullTrustedPointerHandle = 549 kNullIndirectPointerHandle; 550 551 // The maximum number of entries in an trusted pointer table. 552 constexpr int kTrustedPointerTableEntrySize = 8; 553 constexpr int kTrustedPointerTableEntrySizeLog2 = 3; 554 constexpr size_t kMaxTrustedPointers = 555 kTrustedPointerTableReservationSize / kTrustedPointerTableEntrySize; 556 static_assert((1 << (32 - kTrustedPointerHandleShift)) == kMaxTrustedPointers, 557 "kTrustedPointerTableReservationSize and " 558 "kTrustedPointerHandleShift don't match"); 559 560 // 561 // Code Pointers. 562 // 563 // A pointer to a Code object. 564 // Essentially a specialized version of a trusted pointer that (when the 565 // sandbox is enabled) uses the code pointer table (CPT) instead of the TPT. 566 // Each entry in the CPT contains both a pointer to a Code object as well as a 567 // pointer to the Code's entrypoint. This allows calling/jumping into Code with 568 // one fewer memory access (compared to the case where the entrypoint pointer 569 // first needs to be loaded from the Code object). As such, a CodePointerHandle 570 // can be used both to obtain the referenced Code object and to directly load 571 // its entrypoint. 572 // 573 // When the sandbox is disabled, these are regular tagged pointers. 574 using CodePointerHandle = IndirectPointerHandle; 575 576 // The size of the virtual memory reservation for the code pointer table. 577 // As with the other tables, a maximum table size in combination with shifted 578 // indices allows omitting bounds checks. 579 constexpr size_t kCodePointerTableReservationSize = 16 * MB; 580 581 // Code pointer handles are shifted by a different amount than indirect pointer 582 // handles as the tables have a different maximum size. 583 constexpr uint32_t kCodePointerHandleShift = 12; 584 585 // A null handle always references an entry that contains nullptr. 586 constexpr CodePointerHandle kNullCodePointerHandle = kNullIndirectPointerHandle; 587 588 // It can sometimes be necessary to distinguish a code pointer handle from a 589 // trusted pointer handle. A typical example would be a union trusted pointer 590 // field that can refer to both Code objects and other trusted objects. To 591 // support these use-cases, we use a simple marking scheme where some of the 592 // low bits of a code pointer handle are set, while they will be unset on a 593 // trusted pointer handle. This way, the correct table to resolve the handle 594 // can be determined even in the absence of a type tag. 595 constexpr uint32_t kCodePointerHandleMarker = 0x1; 596 static_assert(kCodePointerHandleShift > 0); 597 static_assert(kTrustedPointerHandleShift > 0); 598 599 // The maximum number of entries in a code pointer table. 600 constexpr int kCodePointerTableEntrySize = 16; 601 constexpr int kCodePointerTableEntrySizeLog2 = 4; 602 constexpr size_t kMaxCodePointers = 603 kCodePointerTableReservationSize / kCodePointerTableEntrySize; 604 static_assert( 605 (1 << (32 - kCodePointerHandleShift)) == kMaxCodePointers, 606 "kCodePointerTableReservationSize and kCodePointerHandleShift don't match"); 607 608 constexpr int kCodePointerTableEntryEntrypointOffset = 0; 609 constexpr int kCodePointerTableEntryCodeObjectOffset = 8; 610 611 // Constants that can be used to mark places that should be modified once 612 // certain types of objects are moved out of the sandbox and into trusted space. 613 constexpr bool kRuntimeGeneratedCodeObjectsLiveInTrustedSpace = true; 614 constexpr bool kBuiltinCodeObjectsLiveInTrustedSpace = false; 615 constexpr bool kAllCodeObjectsLiveInTrustedSpace = 616 kRuntimeGeneratedCodeObjectsLiveInTrustedSpace && 617 kBuiltinCodeObjectsLiveInTrustedSpace; 618 619 // {obj} must be the raw tagged pointer representation of a HeapObject 620 // that's guaranteed to never be in ReadOnlySpace. 621 V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); 622 623 // Returns if we need to throw when an error occurs. This infers the language 624 // mode based on the current context and the closure. This returns true if the 625 // language mode is strict. 626 V8_EXPORT bool ShouldThrowOnError(internal::Isolate* isolate); 627 /** 628 * This class exports constants and functionality from within v8 that 629 * is necessary to implement inline functions in the v8 api. Don't 630 * depend on functions and constants defined here. 631 */ 632 class Internals { 633 #ifdef V8_MAP_PACKING 634 V8_INLINE static constexpr Address UnpackMapWord(Address mapword) { 635 // TODO(wenyuzhao): Clear header metadata. 636 return mapword ^ kMapWordXorMask; 637 } 638 #endif 639 640 public: 641 // These values match non-compiler-dependent values defined within 642 // the implementation of v8. 643 static const int kHeapObjectMapOffset = 0; 644 static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size; 645 static const int kStringResourceOffset = 646 1 * kApiTaggedSize + 2 * kApiInt32Size; 647 648 static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize; 649 static const int kJSObjectHeaderSize = 3 * kApiTaggedSize; 650 static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; 651 static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; 652 static const int kEmbedderDataSlotSize = kApiSystemPointerSize; 653 #ifdef V8_ENABLE_SANDBOX 654 static const int kEmbedderDataSlotExternalPointerOffset = kApiTaggedSize; 655 #else 656 static const int kEmbedderDataSlotExternalPointerOffset = 0; 657 #endif 658 static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize; 659 static const int kStringRepresentationAndEncodingMask = 0x0f; 660 static const int kStringEncodingMask = 0x8; 661 static const int kExternalTwoByteRepresentationTag = 0x02; 662 static const int kExternalOneByteRepresentationTag = 0x0a; 663 664 static const uint32_t kNumIsolateDataSlots = 4; 665 static const int kStackGuardSize = 8 * kApiSystemPointerSize; 666 static const int kNumberOfBooleanFlags = 6; 667 static const int kErrorMessageParamSize = 1; 668 static const int kTablesAlignmentPaddingSize = 1; 669 static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize; 670 static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize; 671 static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize; 672 static const int kThreadLocalTopSize = 30 * kApiSystemPointerSize; 673 static const int kHandleScopeDataSize = 674 2 * kApiSystemPointerSize + 2 * kApiInt32Size; 675 676 // ExternalPointerTable and TrustedPointerTable layout guarantees. 677 static const int kExternalPointerTableBasePointerOffset = 0; 678 static const int kExternalPointerTableSize = 2 * kApiSystemPointerSize; 679 static const int kTrustedPointerTableSize = 2 * kApiSystemPointerSize; 680 static const int kTrustedPointerTableBasePointerOffset = 0; 681 682 // IsolateData layout guarantees. 683 static const int kIsolateCageBaseOffset = 0; 684 static const int kIsolateStackGuardOffset = 685 kIsolateCageBaseOffset + kApiSystemPointerSize; 686 static const int kVariousBooleanFlagsOffset = 687 kIsolateStackGuardOffset + kStackGuardSize; 688 static const int kErrorMessageParamOffset = 689 kVariousBooleanFlagsOffset + kNumberOfBooleanFlags; 690 static const int kBuiltinTier0EntryTableOffset = kErrorMessageParamOffset + 691 kErrorMessageParamSize + 692 kTablesAlignmentPaddingSize; 693 static const int kBuiltinTier0TableOffset = 694 kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize; 695 static const int kNewAllocationInfoOffset = 696 kBuiltinTier0TableOffset + kBuiltinTier0TableSize; 697 static const int kOldAllocationInfoOffset = 698 kNewAllocationInfoOffset + kLinearAllocationAreaSize; 699 700 static const int kFastCCallAlignmentPaddingSize = 701 kApiSystemPointerSize == 8 ? 0 : kApiSystemPointerSize; 702 static const int kIsolateFastCCallCallerFpOffset = 703 kOldAllocationInfoOffset + kLinearAllocationAreaSize + 704 kFastCCallAlignmentPaddingSize; 705 static const int kIsolateFastCCallCallerPcOffset = 706 kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize; 707 static const int kIsolateFastApiCallTargetOffset = 708 kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize; 709 static const int kIsolateLongTaskStatsCounterOffset = 710 kIsolateFastApiCallTargetOffset + kApiSystemPointerSize; 711 static const int kIsolateThreadLocalTopOffset = 712 kIsolateLongTaskStatsCounterOffset + kApiSizetSize; 713 static const int kIsolateHandleScopeDataOffset = 714 kIsolateThreadLocalTopOffset + kThreadLocalTopSize; 715 static const int kIsolateEmbedderDataOffset = 716 kIsolateHandleScopeDataOffset + kHandleScopeDataSize; 717 #ifdef V8_COMPRESS_POINTERS 718 static const int kIsolateExternalPointerTableOffset = 719 kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize; 720 static const int kIsolateSharedExternalPointerTableAddressOffset = 721 kIsolateExternalPointerTableOffset + kExternalPointerTableSize; 722 #ifdef V8_ENABLE_SANDBOX 723 static const int kIsolateTrustedCageBaseOffset = 724 kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize; 725 static const int kIsolateTrustedPointerTableOffset = 726 kIsolateTrustedCageBaseOffset + kApiSystemPointerSize; 727 static const int kIsolateApiCallbackThunkArgumentOffset = 728 kIsolateTrustedPointerTableOffset + kTrustedPointerTableSize; 729 #else 730 static const int kIsolateApiCallbackThunkArgumentOffset = 731 kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize; 732 #endif // V8_ENABLE_SANDBOX 733 #else 734 static const int kIsolateApiCallbackThunkArgumentOffset = 735 kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize; 736 #endif // V8_COMPRESS_POINTERS 737 static const int kContinuationPreservedEmbedderDataOffset = 738 kIsolateApiCallbackThunkArgumentOffset + kApiSystemPointerSize; 739 740 static const int kWasm64OOBOffsetAlignmentPaddingSize = 0; 741 static const int kWasm64OOBOffsetOffset = 742 kContinuationPreservedEmbedderDataOffset + kApiSystemPointerSize + 743 kWasm64OOBOffsetAlignmentPaddingSize; 744 static const int kIsolateRootsOffset = 745 kWasm64OOBOffsetOffset + sizeof(int64_t); 746 747 #if V8_STATIC_ROOTS_BOOL 748 749 // These constants are copied from static-roots.h and guarded by static asserts. 750 #define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \ 751 V(UndefinedValue, 0x69) \ 752 V(NullValue, 0x85) \ 753 V(TrueValue, 0xc9) \ 754 V(FalseValue, 0xad) \ 755 V(EmptyString, 0xa1) \ 756 V(TheHoleValue, 0x719) 757 758 using Tagged_t = uint32_t; 759 struct StaticReadOnlyRoot { 760 #define DEF_ROOT(name, value) static constexpr Tagged_t k##name = value; 761 EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT) 762 #undef DEF_ROOT 763 764 static constexpr Tagged_t kFirstStringMap = 0xe5; 765 static constexpr Tagged_t kLastStringMap = 0x47d; 766 767 #define PLUSONE(...) +1 768 static constexpr size_t kNumberOfExportedStaticRoots = 769 2 + EXPORTED_STATIC_ROOTS_PTR_LIST(PLUSONE); 770 #undef PLUSONE 771 }; 772 773 #endif // V8_STATIC_ROOTS_BOOL 774 775 static const int kUndefinedValueRootIndex = 4; 776 static const int kTheHoleValueRootIndex = 5; 777 static const int kNullValueRootIndex = 6; 778 static const int kTrueValueRootIndex = 7; 779 static const int kFalseValueRootIndex = 8; 780 static const int kEmptyStringRootIndex = 9; 781 782 static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize; 783 static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3; 784 static const int kNodeStateMask = 0x3; 785 static const int kNodeStateIsWeakValue = 2; 786 787 static const int kFirstNonstringType = 0x80; 788 static const int kOddballType = 0x83; 789 static const int kForeignType = 0xcc; 790 static const int kJSSpecialApiObjectType = 0x410; 791 static const int kJSObjectType = 0x421; 792 static const int kFirstJSApiObjectType = 0x422; 793 static const int kLastJSApiObjectType = 0x80A; 794 // Defines a range [kFirstEmbedderJSApiObjectType, kJSApiObjectTypesCount] 795 // of JSApiObject instance type values that an embedder can use. 796 static const int kFirstEmbedderJSApiObjectType = 0; 797 static const int kLastEmbedderJSApiObjectType = 798 kLastJSApiObjectType - kFirstJSApiObjectType; 799 800 static const int kUndefinedOddballKind = 4; 801 static const int kNullOddballKind = 3; 802 803 // Constants used by PropertyCallbackInfo to check if we should throw when an 804 // error occurs. 805 static const int kThrowOnError = 0; 806 static const int kDontThrow = 1; 807 static const int kInferShouldThrowMode = 2; 808 809 // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an 810 // incremental GC once the external memory reaches this limit. 811 static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024; 812 813 #ifdef V8_MAP_PACKING 814 static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48; 815 // The lowest two bits of mapwords are always `0b10` 816 static const uintptr_t kMapWordSignature = 0b10; 817 // XORing a (non-compressed) map with this mask ensures that the two 818 // low-order bits are 0b10. The 0 at the end makes this look like a Smi, 819 // although real Smis have all lower 32 bits unset. We only rely on these 820 // values passing as Smis in very few places. 821 static const int kMapWordXorMask = 0b11; 822 #endif 823 824 V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate); 825 V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { 826 #ifdef V8_ENABLE_CHECKS 827 CheckInitializedImpl(isolate); 828 #endif 829 } 830 831 V8_INLINE static constexpr bool HasHeapObjectTag(Address value) { 832 return (value & kHeapObjectTagMask) == static_cast
(kHeapObjectTag); 833 } 834 835 V8_INLINE static constexpr int SmiValue(Address value) { 836 return PlatformSmiTagging::SmiToInt(value); 837 } 838 839 V8_INLINE static constexpr Address IntToSmi(int value) { 840 return internal::IntToSmi(value); 841 } 842 843 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { 844 return PlatformSmiTagging::IsValidSmi(value); 845 } 846 847 #if V8_STATIC_ROOTS_BOOL 848 V8_INLINE static bool is_identical(Address obj, Tagged_t constant) { 849 return static_cast
(obj) == constant; 850 } 851 852 V8_INLINE static bool CheckInstanceMapRange(Address obj, Tagged_t first_map, 853 Tagged_t last_map) { 854 auto map = ReadRawField
(obj, kHeapObjectMapOffset); 855 #ifdef V8_MAP_PACKING 856 map = UnpackMapWord(map); 857 #endif 858 return map >= first_map && map <= last_map; 859 } 860 #endif 861 862 V8_INLINE static int GetInstanceType(Address obj) { 863 Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset); 864 #ifdef V8_MAP_PACKING 865 map = UnpackMapWord(map); 866 #endif 867 return ReadRawField
(map, kMapInstanceTypeOffset); 868 } 869 870 V8_INLINE static Address LoadMap(Address obj) { 871 if (!HasHeapObjectTag(obj)) return kNullAddress; 872 Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset); 873 #ifdef V8_MAP_PACKING 874 map = UnpackMapWord(map); 875 #endif 876 return map; 877 } 878 879 V8_INLINE static int GetOddballKind(Address obj) { 880 return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset)); 881 } 882 883 V8_INLINE static bool IsExternalTwoByteString(int instance_type) { 884 int representation = (instance_type & kStringRepresentationAndEncodingMask); 885 return representation == kExternalTwoByteRepresentationTag; 886 } 887 888 V8_INLINE static constexpr bool CanHaveInternalField(int instance_type) { 889 static_assert(kJSObjectType + 1 == kFirstJSApiObjectType); 890 static_assert(kJSObjectType < kLastJSApiObjectType); 891 static_assert(kFirstJSApiObjectType < kLastJSApiObjectType); 892 // Check for IsJSObject() || IsJSSpecialApiObject() || IsJSApiObject() 893 return instance_type == kJSSpecialApiObjectType || 894 // inlined version of base::IsInRange 895 (static_cast
(static_cast
(instance_type) - 896 static_cast
(kJSObjectType)) <= 897 static_cast
(kLastJSApiObjectType - kJSObjectType)); 898 } 899 900 V8_INLINE static uint8_t GetNodeFlag(Address* obj, int shift) { 901 uint8_t* addr = reinterpret_cast
(obj) + kNodeFlagsOffset; 902 return *addr & static_cast
(1U << shift); 903 } 904 905 V8_INLINE static void UpdateNodeFlag(Address* obj, bool value, int shift) { 906 uint8_t* addr = reinterpret_cast
(obj) + kNodeFlagsOffset; 907 uint8_t mask = static_cast
(1U << shift); 908 *addr = static_cast
((*addr & ~mask) | (value << shift)); 909 } 910 911 V8_INLINE static uint8_t GetNodeState(Address* obj) { 912 uint8_t* addr = reinterpret_cast
(obj) + kNodeFlagsOffset; 913 return *addr & kNodeStateMask; 914 } 915 916 V8_INLINE static void UpdateNodeState(Address* obj, uint8_t value) { 917 uint8_t* addr = reinterpret_cast
(obj) + kNodeFlagsOffset; 918 *addr = static_cast
((*addr & ~kNodeStateMask) | value); 919 } 920 921 V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot, 922 void* data) { 923 Address addr = reinterpret_cast
(isolate) + 924 kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize; 925 *reinterpret_cast
(addr) = data; 926 } 927 928 V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate, 929 uint32_t slot) { 930 Address addr = reinterpret_cast
(isolate) + 931 kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize; 932 return *reinterpret_cast
(addr); 933 } 934 935 V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) { 936 Address addr = 937 reinterpret_cast
(isolate) + kIsolateLongTaskStatsCounterOffset; 938 ++(*reinterpret_cast
(addr)); 939 } 940 941 V8_INLINE static Address* GetRootSlot(v8::Isolate* isolate, int index) { 942 Address addr = reinterpret_cast
(isolate) + kIsolateRootsOffset + 943 index * kApiSystemPointerSize; 944 return reinterpret_cast
(addr); 945 } 946 947 V8_INLINE static Address GetRoot(v8::Isolate* isolate, int index) { 948 #if V8_STATIC_ROOTS_BOOL 949 Address base = *reinterpret_cast
( 950 reinterpret_cast
(isolate) + kIsolateCageBaseOffset); 951 switch (index) { 952 #define DECOMPRESS_ROOT(name, ...) \ 953 case k##name##RootIndex: \ 954 return base + StaticReadOnlyRoot::k##name; 955 EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT) 956 #undef DECOMPRESS_ROOT 957 #undef EXPORTED_STATIC_ROOTS_PTR_LIST 958 default: 959 break; 960 } 961 #endif // V8_STATIC_ROOTS_BOOL 962 return *GetRootSlot(isolate, index); 963 } 964 965 #ifdef V8_ENABLE_SANDBOX 966 V8_INLINE static Address* GetExternalPointerTableBase(v8::Isolate* isolate) { 967 Address addr = reinterpret_cast
(isolate) + 968 kIsolateExternalPointerTableOffset + 969 kExternalPointerTableBasePointerOffset; 970 return *reinterpret_cast
(addr); 971 } 972 973 V8_INLINE static Address* GetSharedExternalPointerTableBase( 974 v8::Isolate* isolate) { 975 Address addr = reinterpret_cast
(isolate) + 976 kIsolateSharedExternalPointerTableAddressOffset; 977 addr = *reinterpret_cast
(addr); 978 addr += kExternalPointerTableBasePointerOffset; 979 return *reinterpret_cast
(addr); 980 } 981 #endif 982 983 template
984 V8_INLINE static T ReadRawField(Address heap_object_ptr, int offset) { 985 Address addr = heap_object_ptr + offset - kHeapObjectTag; 986 #ifdef V8_COMPRESS_POINTERS 987 if (sizeof(T) > kApiTaggedSize) { 988 // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size 989 // fields (external pointers, doubles and BigInt data) are only 990 // kTaggedSize aligned so we have to use unaligned pointer friendly way of 991 // accessing them in order to avoid undefined behavior in C++ code. 992 T r; 993 memcpy(&r, reinterpret_cast
(addr), sizeof(T)); 994 return r; 995 } 996 #endif 997 return *reinterpret_cast
(addr); 998 } 999 1000 V8_INLINE static Address ReadTaggedPointerField(Address heap_object_ptr, 1001 int offset) { 1002 #ifdef V8_COMPRESS_POINTERS 1003 uint32_t value = ReadRawField
(heap_object_ptr, offset); 1004 Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); 1005 return base + static_cast
(static_cast
(value)); 1006 #else 1007 return ReadRawField
(heap_object_ptr, offset); 1008 #endif 1009 } 1010 1011 V8_INLINE static Address ReadTaggedSignedField(Address heap_object_ptr, 1012 int offset) { 1013 #ifdef V8_COMPRESS_POINTERS 1014 uint32_t value = ReadRawField
(heap_object_ptr, offset); 1015 return static_cast
(static_cast
(value)); 1016 #else 1017 return ReadRawField
(heap_object_ptr, offset); 1018 #endif 1019 } 1020 1021 V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) { 1022 #ifdef V8_ENABLE_SANDBOX 1023 return reinterpret_cast
( 1024 internal::IsolateFromNeverReadOnlySpaceObject(obj)); 1025 #else 1026 // Not used in non-sandbox mode. 1027 return nullptr; 1028 #endif 1029 } 1030 1031 template
1032 V8_INLINE static Address ReadExternalPointerField(v8::Isolate* isolate, 1033 Address heap_object_ptr, 1034 int offset) { 1035 #ifdef V8_ENABLE_SANDBOX 1036 static_assert(tag != kExternalPointerNullTag); 1037 // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so 1038 // it can be inlined and doesn't require an additional call. 1039 Address* table = IsSharedExternalPointerType(tag) 1040 ? GetSharedExternalPointerTableBase(isolate) 1041 : GetExternalPointerTableBase(isolate); 1042 internal::ExternalPointerHandle handle = 1043 ReadRawField
(heap_object_ptr, offset); 1044 uint32_t index = handle >> kExternalPointerIndexShift; 1045 std::atomic
* ptr = 1046 reinterpret_cast
*>(&table[index]); 1047 Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed); 1048 return entry & ~tag; 1049 #else 1050 return ReadRawField
(heap_object_ptr, offset); 1051 #endif // V8_ENABLE_SANDBOX 1052 } 1053 1054 #ifdef V8_COMPRESS_POINTERS 1055 V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) { 1056 return addr & -static_cast
(kPtrComprCageBaseAlignment); 1057 } 1058 1059 V8_INLINE static uint32_t CompressTagged(Address value) { 1060 return static_cast
(value); 1061 } 1062 1063 V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr, 1064 uint32_t value) { 1065 Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); 1066 return base + static_cast
(static_cast
(value)); 1067 } 1068 1069 #endif // V8_COMPRESS_POINTERS 1070 }; 1071 1072 // Only perform cast check for types derived from v8::Data since 1073 // other types do not implement the Cast method. 1074 template
1075 struct CastCheck { 1076 template
1077 static void Perform(T* data); 1078 }; 1079 1080 template <> 1081 template
1082 void CastCheck
::Perform(T* data) { 1083 T::Cast(data); 1084 } 1085 1086 template <> 1087 template
1088 void CastCheck
::Perform(T* data) {} 1089 1090 template
1091 V8_INLINE void PerformCastCheck(T* data) { 1092 CastCheck
::value && 1093 !std::is_same
>::value>::Perform(data); 1094 } 1095 1096 // A base class for backing stores, which is needed due to vagaries of 1097 // how static casts work with std::shared_ptr. 1098 class BackingStoreBase {}; 1099 1100 // The maximum value in enum GarbageCollectionReason, defined in heap.h. 1101 // This is needed for histograms sampling garbage collection reasons. 1102 constexpr int kGarbageCollectionReasonMaxValue = 27; 1103 1104 // Base class for the address block allocator compatible with standard 1105 // containers, which registers its allocated range as strong roots. 1106 class V8_EXPORT StrongRootAllocatorBase { 1107 public: 1108 Heap* heap() const { return heap_; } 1109 1110 bool operator==(const StrongRootAllocatorBase& other) const { 1111 return heap_ == other.heap_; 1112 } 1113 bool operator!=(const StrongRootAllocatorBase& other) const { 1114 return heap_ != other.heap_; 1115 } 1116 1117 protected: 1118 explicit StrongRootAllocatorBase(Heap* heap) : heap_(heap) {} 1119 explicit StrongRootAllocatorBase(v8::Isolate* isolate); 1120 1121 // Allocate/deallocate a range of n elements of type internal::Address. 1122 Address* allocate_impl(size_t n); 1123 void deallocate_impl(Address* p, size_t n) noexcept; 1124 1125 private: 1126 Heap* heap_; 1127 }; 1128 1129 // The general version of this template behaves just as std::allocator, with 1130 // the exception that the constructor takes the isolate as parameter. Only 1131 // specialized versions, e.g., internal::StrongRootAllocator
1132 // and internal::StrongRootAllocator
> register the allocated range 1133 // as strong roots. 1134 template
1135 class StrongRootAllocator : public StrongRootAllocatorBase, 1136 private std::allocator
{ 1137 public: 1138 using value_type = T; 1139 1140 explicit StrongRootAllocator(Heap* heap) : StrongRootAllocatorBase(heap) {} 1141 explicit StrongRootAllocator(v8::Isolate* isolate) 1142 : StrongRootAllocatorBase(isolate) {} 1143 template
1144 StrongRootAllocator(const StrongRootAllocator
& other) noexcept 1145 : StrongRootAllocatorBase(other) {} 1146 1147 using std::allocator
::allocate; 1148 using std::allocator
::deallocate; 1149 }; 1150 1151 // A class of iterators that wrap some different iterator type. 1152 // If specified, ElementType is the type of element accessed by the wrapper 1153 // iterator; in this case, the actual reference and pointer types of Iterator 1154 // must be convertible to ElementType& and ElementType*, respectively. 1155 template
1156 class WrappedIterator { 1157 public: 1158 static_assert( 1159 !std::is_void_v
|| 1160 (std::is_convertible_v
::pointer, 1161 ElementType*> && 1162 std::is_convertible_v
::reference, 1163 ElementType&>)); 1164 1165 using iterator_category = 1166 typename std::iterator_traits
::iterator_category; 1167 using difference_type = 1168 typename std::iterator_traits
::difference_type; 1169 using value_type = 1170 std::conditional_t
, 1171 typename std::iterator_traits
::value_type, 1172 ElementType>; 1173 using pointer = 1174 std::conditional_t
, 1175 typename std::iterator_traits
::pointer, 1176 ElementType*>; 1177 using reference = 1178 std::conditional_t
, 1179 typename std::iterator_traits
::reference, 1180 ElementType&>; 1181 1182 constexpr WrappedIterator() noexcept : it_() {} 1183 constexpr explicit WrappedIterator(Iterator it) noexcept : it_(it) {} 1184 1185 template
, 1187 bool> = true> 1188 constexpr WrappedIterator( 1189 const WrappedIterator
& it) noexcept 1190 : it_(it.base()) {} 1191 1192 constexpr reference operator*() const noexcept { return *it_; } 1193 constexpr pointer operator->() const noexcept { return it_.operator->(); } 1194 1195 constexpr WrappedIterator& operator++() noexcept { 1196 ++it_; 1197 return *this; 1198 } 1199 constexpr WrappedIterator operator++(int) noexcept { 1200 WrappedIterator result(*this); 1201 ++(*this); 1202 return result; 1203 } 1204 1205 constexpr WrappedIterator& operator--() noexcept { 1206 --it_; 1207 return *this; 1208 } 1209 constexpr WrappedIterator operator--(int) noexcept { 1210 WrappedIterator result(*this); 1211 --(*this); 1212 return result; 1213 } 1214 constexpr WrappedIterator operator+(difference_type n) const noexcept { 1215 WrappedIterator result(*this); 1216 result += n; 1217 return result; 1218 } 1219 constexpr WrappedIterator& operator+=(difference_type n) noexcept { 1220 it_ += n; 1221 return *this; 1222 } 1223 constexpr WrappedIterator operator-(difference_type n) const noexcept { 1224 return *this + (-n); 1225 } 1226 constexpr WrappedIterator& operator-=(difference_type n) noexcept { 1227 *this += -n; 1228 return *this; 1229 } 1230 constexpr reference operator[](difference_type n) const noexcept { 1231 return it_[n]; 1232 } 1233 1234 constexpr Iterator base() const noexcept { return it_; } 1235 1236 private: 1237 template
1238 friend class WrappedIterator; 1239 1240 private: 1241 Iterator it_; 1242 }; 1243 1244 template
1246 constexpr bool operator==( 1247 const WrappedIterator
& x, 1248 const WrappedIterator
& y) noexcept { 1249 return x.base() == y.base(); 1250 } 1251 1252 template
1254 constexpr bool operator<( 1255 const WrappedIterator
& x, 1256 const WrappedIterator
& y) noexcept { 1257 return x.base() < y.base(); 1258 } 1259 1260 template
1262 constexpr bool operator!=( 1263 const WrappedIterator
& x, 1264 const WrappedIterator
& y) noexcept { 1265 return !(x == y); 1266 } 1267 1268 template
1270 constexpr bool operator>( 1271 const WrappedIterator
& x, 1272 const WrappedIterator
& y) noexcept { 1273 return y < x; 1274 } 1275 1276 template
1278 constexpr bool operator>=( 1279 const WrappedIterator
& x, 1280 const WrappedIterator
& y) noexcept { 1281 return !(x < y); 1282 } 1283 1284 template
1286 constexpr bool operator<=( 1287 const WrappedIterator
& x, 1288 const WrappedIterator
& y) noexcept { 1289 return !(y < x); 1290 } 1291 1292 template
1294 constexpr auto operator-( 1295 const WrappedIterator
& x, 1296 const WrappedIterator
& y) noexcept 1297 -> decltype(x.base() - y.base()) { 1298 return x.base() - y.base(); 1299 } 1300 1301 template
1302 constexpr WrappedIterator
operator+( 1303 typename WrappedIterator
::difference_type n, 1304 const WrappedIterator
& x) noexcept { 1305 x += n; 1306 return x; 1307 } 1308 1309 // Helper functions about values contained in handles. 1310 // A value is either an indirect pointer or a direct pointer, depending on 1311 // whether direct local support is enabled. 1312 class ValueHelper final { 1313 public: 1314 #ifdef V8_ENABLE_DIRECT_LOCAL 1315 static constexpr Address kTaggedNullAddress = 1; 1316 static constexpr Address kEmpty = kTaggedNullAddress; 1317 #else 1318 static constexpr Address kEmpty = kNullAddress; 1319 #endif // V8_ENABLE_DIRECT_LOCAL 1320 1321 template
1322 V8_INLINE static bool IsEmpty(T* value) { 1323 return reinterpret_cast
(value) == kEmpty; 1324 } 1325 1326 // Returns a handle's "value" for all kinds of abstract handles. For Local, 1327 // it is equivalent to `*handle`. The variadic parameters support handle 1328 // types with extra type parameters, like `Persistent
`. 1329 template
typename H, typename T, 1330 typename... Ms> 1331 V8_INLINE static T* HandleAsValue(const H
& handle) { 1332 return handle.template value
(); 1333 } 1334 1335 #ifdef V8_ENABLE_DIRECT_LOCAL 1336 1337 template
1338 V8_INLINE static Address ValueAsAddress(const T* value) { 1339 return reinterpret_cast
(value); 1340 } 1341 1342 template
1343 V8_INLINE static T* SlotAsValue(S* slot) { 1344 if (check_null && slot == nullptr) { 1345 return reinterpret_cast
(kTaggedNullAddress); 1346 } 1347 return *reinterpret_cast
(slot); 1348 } 1349 1350 #else // !V8_ENABLE_DIRECT_LOCAL 1351 1352 template
1353 V8_INLINE static Address ValueAsAddress(const T* value) { 1354 return *reinterpret_cast
(value); 1355 } 1356 1357 template
1358 V8_INLINE static T* SlotAsValue(S* slot) { 1359 return reinterpret_cast
(slot); 1360 } 1361 1362 #endif // V8_ENABLE_DIRECT_LOCAL 1363 }; 1364 1365 /** 1366 * Helper functions about handles. 1367 */ 1368 class HandleHelper final { 1369 public: 1370 /** 1371 * Checks whether two handles are equal. 1372 * They are equal iff they are both empty or they are both non-empty and the 1373 * objects to which they refer are physically equal. 1374 * 1375 * If both handles refer to JS objects, this is the same as strict equality. 1376 * For primitives, such as numbers or strings, a `false` return value does not 1377 * indicate that the values aren't equal in the JavaScript sense. 1378 * Use `Value::StrictEquals()` to check primitives for equality. 1379 */ 1380 template
1381 V8_INLINE static bool EqualHandles(const T1& lhs, const T2& rhs) { 1382 if (lhs.IsEmpty()) return rhs.IsEmpty(); 1383 if (rhs.IsEmpty()) return false; 1384 return lhs.ptr() == rhs.ptr(); 1385 } 1386 1387 static V8_EXPORT bool IsOnStack(const void* ptr); 1388 static V8_EXPORT void VerifyOnStack(const void* ptr); 1389 static V8_EXPORT void VerifyOnMainThread(); 1390 }; 1391 1392 V8_EXPORT void VerifyHandleIsNonEmpty(bool is_empty); 1393 1394 } // namespace internal 1395 } // namespace v8 1396 1397 #endif // INCLUDE_V8_INTERNAL_H_
Contact us
|
About us
|
Term of use
|
Copyright © 2000-2025 MyWebUniversity.com ™