Where Online Learning is simpler!
The C and C++ Include Header Files
/usr/include/nodejs/deps/v8/include/v8-platform.h
$ cat -n /usr/include/nodejs/deps/v8/include/v8-platform.h 1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_V8_PLATFORM_H_ 6 #define V8_V8_PLATFORM_H_ 7 8 #include
9 #include
10 #include
// For abort. 11 #include
12 #include
13 14 #include "v8config.h" // NOLINT(build/include_directory) 15 16 namespace v8 { 17 18 class Isolate; 19 20 // Valid priorities supported by the task scheduling infrastructure. 21 enum class TaskPriority : uint8_t { 22 /** 23 * Best effort tasks are not critical for performance of the application. The 24 * platform implementation should preempt such tasks if higher priority tasks 25 * arrive. 26 */ 27 kBestEffort, 28 /** 29 * User visible tasks are long running background tasks that will 30 * improve performance and memory usage of the application upon completion. 31 * Example: background compilation and garbage collection. 32 */ 33 kUserVisible, 34 /** 35 * User blocking tasks are highest priority tasks that block the execution 36 * thread (e.g. major garbage collection). They must be finished as soon as 37 * possible. 38 */ 39 kUserBlocking, 40 }; 41 42 /** 43 * A Task represents a unit of work. 44 */ 45 class Task { 46 public: 47 virtual ~Task() = default; 48 49 virtual void Run() = 0; 50 }; 51 52 /** 53 * An IdleTask represents a unit of work to be performed in idle time. 54 * The Run method is invoked with an argument that specifies the deadline in 55 * seconds returned by MonotonicallyIncreasingTime(). 56 * The idle task is expected to complete by this deadline. 57 */ 58 class IdleTask { 59 public: 60 virtual ~IdleTask() = default; 61 virtual void Run(double deadline_in_seconds) = 0; 62 }; 63 64 /** 65 * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to 66 * post tasks after the isolate gets destructed, but these tasks may not get 67 * executed anymore. All tasks posted to a given TaskRunner will be invoked in 68 * sequence. Tasks can be posted from any thread. 69 */ 70 class TaskRunner { 71 public: 72 /** 73 * Schedules a task to be invoked by this TaskRunner. The TaskRunner 74 * implementation takes ownership of |task|. 75 */ 76 virtual void PostTask(std::unique_ptr
task) = 0; 77 78 /** 79 * Schedules a task to be invoked by this TaskRunner. The TaskRunner 80 * implementation takes ownership of |task|. The |task| cannot be nested 81 * within other task executions. 82 * 83 * Tasks which shouldn't be interleaved with JS execution must be posted with 84 * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the 85 * embedder may process tasks in a callback which is called during JS 86 * execution. 87 * 88 * In particular, tasks which execute JS must be non-nestable, since JS 89 * execution is not allowed to nest. 90 * 91 * Requires that |TaskRunner::NonNestableTasksEnabled()| is true. 92 */ 93 virtual void PostNonNestableTask(std::unique_ptr
task) {} 94 95 /** 96 * Schedules a task to be invoked by this TaskRunner. The task is scheduled 97 * after the given number of seconds |delay_in_seconds|. The TaskRunner 98 * implementation takes ownership of |task|. 99 */ 100 virtual void PostDelayedTask(std::unique_ptr
task, 101 double delay_in_seconds) = 0; 102 103 /** 104 * Schedules a task to be invoked by this TaskRunner. The task is scheduled 105 * after the given number of seconds |delay_in_seconds|. The TaskRunner 106 * implementation takes ownership of |task|. The |task| cannot be nested 107 * within other task executions. 108 * 109 * Tasks which shouldn't be interleaved with JS execution must be posted with 110 * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the 111 * embedder may process tasks in a callback which is called during JS 112 * execution. 113 * 114 * In particular, tasks which execute JS must be non-nestable, since JS 115 * execution is not allowed to nest. 116 * 117 * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true. 118 */ 119 virtual void PostNonNestableDelayedTask(std::unique_ptr
task, 120 double delay_in_seconds) {} 121 122 /** 123 * Schedules an idle task to be invoked by this TaskRunner. The task is 124 * scheduled when the embedder is idle. Requires that 125 * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered 126 * relative to other task types and may be starved for an arbitrarily long 127 * time if no idle time is available. The TaskRunner implementation takes 128 * ownership of |task|. 129 */ 130 virtual void PostIdleTask(std::unique_ptr
task) = 0; 131 132 /** 133 * Returns true if idle tasks are enabled for this TaskRunner. 134 */ 135 virtual bool IdleTasksEnabled() = 0; 136 137 /** 138 * Returns true if non-nestable tasks are enabled for this TaskRunner. 139 */ 140 virtual bool NonNestableTasksEnabled() const { return false; } 141 142 /** 143 * Returns true if non-nestable delayed tasks are enabled for this TaskRunner. 144 */ 145 virtual bool NonNestableDelayedTasksEnabled() const { return false; } 146 147 TaskRunner() = default; 148 virtual ~TaskRunner() = default; 149 150 TaskRunner(const TaskRunner&) = delete; 151 TaskRunner& operator=(const TaskRunner&) = delete; 152 }; 153 154 /** 155 * Delegate that's passed to Job's worker task, providing an entry point to 156 * communicate with the scheduler. 157 */ 158 class JobDelegate { 159 public: 160 /** 161 * Returns true if this thread should return from the worker task on the 162 * current thread ASAP. Workers should periodically invoke ShouldYield (or 163 * YieldIfNeeded()) as often as is reasonable. 164 */ 165 virtual bool ShouldYield() = 0; 166 167 /** 168 * Notifies the scheduler that max concurrency was increased, and the number 169 * of worker should be adjusted accordingly. See Platform::PostJob() for more 170 * details. 171 */ 172 virtual void NotifyConcurrencyIncrease() = 0; 173 174 /** 175 * Returns a task_id unique among threads currently running this job, such 176 * that GetTaskId() < worker count. To achieve this, the same task_id may be 177 * reused by a different thread after a worker_task returns. 178 */ 179 virtual uint8_t GetTaskId() = 0; 180 181 /** 182 * Returns true if the current task is called from the thread currently 183 * running JobHandle::Join(). 184 */ 185 virtual bool IsJoiningThread() const = 0; 186 }; 187 188 /** 189 * Handle returned when posting a Job. Provides methods to control execution of 190 * the posted Job. 191 */ 192 class JobHandle { 193 public: 194 virtual ~JobHandle() = default; 195 196 /** 197 * Notifies the scheduler that max concurrency was increased, and the number 198 * of worker should be adjusted accordingly. See Platform::PostJob() for more 199 * details. 200 */ 201 virtual void NotifyConcurrencyIncrease() = 0; 202 203 /** 204 * Contributes to the job on this thread. Doesn't return until all tasks have 205 * completed and max concurrency becomes 0. When Join() is called and max 206 * concurrency reaches 0, it should not increase again. This also promotes 207 * this Job's priority to be at least as high as the calling thread's 208 * priority. 209 */ 210 virtual void Join() = 0; 211 212 /** 213 * Forces all existing workers to yield ASAP. Waits until they have all 214 * returned from the Job's callback before returning. 215 */ 216 virtual void Cancel() = 0; 217 218 /* 219 * Forces all existing workers to yield ASAP but doesn’t wait for them. 220 * Warning, this is dangerous if the Job's callback is bound to or has access 221 * to state which may be deleted after this call. 222 */ 223 virtual void CancelAndDetach() = 0; 224 225 /** 226 * Returns true if there's any work pending or any worker running. 227 */ 228 virtual bool IsActive() = 0; 229 230 /** 231 * Returns true if associated with a Job and other methods may be called. 232 * Returns false after Join() or Cancel() was called. This may return true 233 * even if no workers are running and IsCompleted() returns true 234 */ 235 virtual bool IsValid() = 0; 236 237 /** 238 * Returns true if job priority can be changed. 239 */ 240 virtual bool UpdatePriorityEnabled() const { return false; } 241 242 /** 243 * Update this Job's priority. 244 */ 245 virtual void UpdatePriority(TaskPriority new_priority) {} 246 }; 247 248 /** 249 * A JobTask represents work to run in parallel from Platform::PostJob(). 250 */ 251 class JobTask { 252 public: 253 virtual ~JobTask() = default; 254 255 virtual void Run(JobDelegate* delegate) = 0; 256 257 /** 258 * Controls the maximum number of threads calling Run() concurrently, given 259 * the number of threads currently assigned to this job and executing Run(). 260 * Run() is only invoked if the number of threads previously running Run() was 261 * less than the value returned. Since GetMaxConcurrency() is a leaf function, 262 * it must not call back any JobHandle methods. 263 */ 264 virtual size_t GetMaxConcurrency(size_t worker_count) const = 0; 265 }; 266 267 /** 268 * The interface represents complex arguments to trace events. 269 */ 270 class ConvertableToTraceFormat { 271 public: 272 virtual ~ConvertableToTraceFormat() = default; 273 274 /** 275 * Append the class info to the provided |out| string. The appended 276 * data must be a valid JSON object. Strings must be properly quoted, and 277 * escaped. There is no processing applied to the content after it is 278 * appended. 279 */ 280 virtual void AppendAsTraceFormat(std::string* out) const = 0; 281 }; 282 283 /** 284 * V8 Tracing controller. 285 * 286 * Can be implemented by an embedder to record trace events from V8. 287 */ 288 class TracingController { 289 public: 290 virtual ~TracingController() = default; 291 292 // In Perfetto mode, trace events are written using Perfetto's Track Event 293 // API directly without going through the embedder. However, it is still 294 // possible to observe tracing being enabled and disabled. 295 #if !defined(V8_USE_PERFETTO) 296 /** 297 * Called by TRACE_EVENT* macros, don't call this directly. 298 * The name parameter is a category group for example: 299 * TRACE_EVENT0("v8,parse", "V8.Parse") 300 * The pointer returned points to a value with zero or more of the bits 301 * defined in CategoryGroupEnabledFlags. 302 **/ 303 virtual const uint8_t* GetCategoryGroupEnabled(const char* name) { 304 static uint8_t no = 0; 305 return &no; 306 } 307 308 /** 309 * Adds a trace event to the platform tracing system. These function calls are 310 * usually the result of a TRACE_* macro from trace_event_common.h when 311 * tracing and the category of the particular trace are enabled. It is not 312 * advisable to call these functions on their own; they are really only meant 313 * to be used by the trace macros. The returned handle can be used by 314 * UpdateTraceEventDuration to update the duration of COMPLETE events. 315 */ 316 virtual uint64_t AddTraceEvent( 317 char phase, const uint8_t* category_enabled_flag, const char* name, 318 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args, 319 const char** arg_names, const uint8_t* arg_types, 320 const uint64_t* arg_values, 321 std::unique_ptr
* arg_convertables, 322 unsigned int flags) { 323 return 0; 324 } 325 virtual uint64_t AddTraceEventWithTimestamp( 326 char phase, const uint8_t* category_enabled_flag, const char* name, 327 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args, 328 const char** arg_names, const uint8_t* arg_types, 329 const uint64_t* arg_values, 330 std::unique_ptr
* arg_convertables, 331 unsigned int flags, int64_t timestamp) { 332 return 0; 333 } 334 335 /** 336 * Sets the duration field of a COMPLETE trace event. It must be called with 337 * the handle returned from AddTraceEvent(). 338 **/ 339 virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag, 340 const char* name, uint64_t handle) {} 341 #endif // !defined(V8_USE_PERFETTO) 342 343 class TraceStateObserver { 344 public: 345 virtual ~TraceStateObserver() = default; 346 virtual void OnTraceEnabled() = 0; 347 virtual void OnTraceDisabled() = 0; 348 }; 349 350 /** Adds tracing state change observer. */ 351 virtual void AddTraceStateObserver(TraceStateObserver*) {} 352 353 /** Removes tracing state change observer. */ 354 virtual void RemoveTraceStateObserver(TraceStateObserver*) {} 355 }; 356 357 /** 358 * A V8 memory page allocator. 359 * 360 * Can be implemented by an embedder to manage large host OS allocations. 361 */ 362 class PageAllocator { 363 public: 364 virtual ~PageAllocator() = default; 365 366 /** 367 * Gets the page granularity for AllocatePages and FreePages. Addresses and 368 * lengths for those calls should be multiples of AllocatePageSize(). 369 */ 370 virtual size_t AllocatePageSize() = 0; 371 372 /** 373 * Gets the page granularity for SetPermissions and ReleasePages. Addresses 374 * and lengths for those calls should be multiples of CommitPageSize(). 375 */ 376 virtual size_t CommitPageSize() = 0; 377 378 /** 379 * Sets the random seed so that GetRandomMmapAddr() will generate repeatable 380 * sequences of random mmap addresses. 381 */ 382 virtual void SetRandomMmapSeed(int64_t seed) = 0; 383 384 /** 385 * Returns a randomized address, suitable for memory allocation under ASLR. 386 * The address will be aligned to AllocatePageSize. 387 */ 388 virtual void* GetRandomMmapAddr() = 0; 389 390 /** 391 * Memory permissions. 392 */ 393 enum Permission { 394 kNoAccess, 395 kRead, 396 kReadWrite, 397 kReadWriteExecute, 398 kReadExecute, 399 // Set this when reserving memory that will later require kReadWriteExecute 400 // permissions. The resulting behavior is platform-specific, currently 401 // this is used to set the MAP_JIT flag on Apple Silicon. 402 // TODO(jkummerow): Remove this when Wasm has a platform-independent 403 // w^x implementation. 404 // TODO(saelo): Remove this once all JIT pages are allocated through the 405 // VirtualAddressSpace API. 406 kNoAccessWillJitLater 407 }; 408 409 /** 410 * Allocates memory in range with the given alignment and permission. 411 */ 412 virtual void* AllocatePages(void* address, size_t length, size_t alignment, 413 Permission permissions) = 0; 414 415 /** 416 * Frees memory in a range that was allocated by a call to AllocatePages. 417 */ 418 virtual bool FreePages(void* address, size_t length) = 0; 419 420 /** 421 * Releases memory in a range that was allocated by a call to AllocatePages. 422 */ 423 virtual bool ReleasePages(void* address, size_t length, 424 size_t new_length) = 0; 425 426 /** 427 * Sets permissions on pages in an allocated range. 428 */ 429 virtual bool SetPermissions(void* address, size_t length, 430 Permission permissions) = 0; 431 432 /** 433 * Frees memory in the given [address, address + size) range. address and size 434 * should be operating system page-aligned. The next write to this 435 * memory area brings the memory transparently back. This should be treated as 436 * a hint to the OS that the pages are no longer needed. It does not guarantee 437 * that the pages will be discarded immediately or at all. 438 */ 439 virtual bool DiscardSystemPages(void* address, size_t size) { return true; } 440 441 /** 442 * Decommits any wired memory pages in the given range, allowing the OS to 443 * reclaim them, and marks the region as inacessible (kNoAccess). The address 444 * range stays reserved and can be accessed again later by changing its 445 * permissions. However, in that case the memory content is guaranteed to be 446 * zero-initialized again. The memory must have been previously allocated by a 447 * call to AllocatePages. Returns true on success, false otherwise. 448 */ 449 virtual bool DecommitPages(void* address, size_t size) = 0; 450 451 /** 452 * INTERNAL ONLY: This interface has not been stabilised and may change 453 * without notice from one release to another without being deprecated first. 454 */ 455 class SharedMemoryMapping { 456 public: 457 // Implementations are expected to free the shared memory mapping in the 458 // destructor. 459 virtual ~SharedMemoryMapping() = default; 460 virtual void* GetMemory() const = 0; 461 }; 462 463 /** 464 * INTERNAL ONLY: This interface has not been stabilised and may change 465 * without notice from one release to another without being deprecated first. 466 */ 467 class SharedMemory { 468 public: 469 // Implementations are expected to free the shared memory in the destructor. 470 virtual ~SharedMemory() = default; 471 virtual std::unique_ptr
RemapTo( 472 void* new_address) const = 0; 473 virtual void* GetMemory() const = 0; 474 virtual size_t GetSize() const = 0; 475 }; 476 477 /** 478 * INTERNAL ONLY: This interface has not been stabilised and may change 479 * without notice from one release to another without being deprecated first. 480 * 481 * Reserve pages at a fixed address returning whether the reservation is 482 * possible. The reserved memory is detached from the PageAllocator and so 483 * should not be freed by it. It's intended for use with 484 * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory. 485 */ 486 virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) { 487 return false; 488 } 489 490 /** 491 * INTERNAL ONLY: This interface has not been stabilised and may change 492 * without notice from one release to another without being deprecated first. 493 * 494 * Allocates shared memory pages. Not all PageAllocators need support this and 495 * so this method need not be overridden. 496 * Allocates a new read-only shared memory region of size |length| and copies 497 * the memory at |original_address| into it. 498 */ 499 virtual std::unique_ptr
AllocateSharedPages( 500 size_t length, const void* original_address) { 501 return {}; 502 } 503 504 /** 505 * INTERNAL ONLY: This interface has not been stabilised and may change 506 * without notice from one release to another without being deprecated first. 507 * 508 * If not overridden and changed to return true, V8 will not attempt to call 509 * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages 510 * and RemapSharedPages must also be overridden. 511 */ 512 virtual bool CanAllocateSharedPages() { return false; } 513 }; 514 515 // Opaque type representing a handle to a shared memory region. 516 using PlatformSharedMemoryHandle = intptr_t; 517 static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1; 518 519 // Conversion routines from the platform-dependent shared memory identifiers 520 // into the opaque PlatformSharedMemoryHandle type. These use the underlying 521 // types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t) 522 // to avoid pulling in large OS header files into this header file. Instead, 523 // the users of these routines are expected to include the respecitve OS 524 // headers in addition to this one. 525 #if V8_OS_MACOS 526 // Convert between a shared memory handle and a mach_port_t referencing a memory 527 // entry object. 528 inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry( 529 unsigned int port) { 530 return static_cast
(port); 531 } 532 inline unsigned int MachMemoryEntryFromSharedMemoryHandle( 533 PlatformSharedMemoryHandle handle) { 534 return static_cast
(handle); 535 } 536 #elif V8_OS_FUCHSIA 537 // Convert between a shared memory handle and a zx_handle_t to a VMO. 538 inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) { 539 return static_cast
(handle); 540 } 541 inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) { 542 return static_cast
(handle); 543 } 544 #elif V8_OS_WIN 545 // Convert between a shared memory handle and a Windows HANDLE to a file mapping 546 // object. 547 inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping( 548 void* handle) { 549 return reinterpret_cast
(handle); 550 } 551 inline void* FileMappingFromSharedMemoryHandle( 552 PlatformSharedMemoryHandle handle) { 553 return reinterpret_cast
(handle); 554 } 555 #else 556 // Convert between a shared memory handle and a file descriptor. 557 inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) { 558 return static_cast
(fd); 559 } 560 inline int FileDescriptorFromSharedMemoryHandle( 561 PlatformSharedMemoryHandle handle) { 562 return static_cast
(handle); 563 } 564 #endif 565 566 /** 567 * Possible permissions for memory pages. 568 */ 569 enum class PagePermissions { 570 kNoAccess, 571 kRead, 572 kReadWrite, 573 kReadWriteExecute, 574 kReadExecute, 575 }; 576 577 /** 578 * Class to manage a virtual memory address space. 579 * 580 * This class represents a contiguous region of virtual address space in which 581 * sub-spaces and (private or shared) memory pages can be allocated, freed, and 582 * modified. This interface is meant to eventually replace the PageAllocator 583 * interface, and can be used as an alternative in the meantime. 584 * 585 * This API is not yet stable and may change without notice! 586 */ 587 class VirtualAddressSpace { 588 public: 589 using Address = uintptr_t; 590 591 VirtualAddressSpace(size_t page_size, size_t allocation_granularity, 592 Address base, size_t size, 593 PagePermissions max_page_permissions) 594 : page_size_(page_size), 595 allocation_granularity_(allocation_granularity), 596 base_(base), 597 size_(size), 598 max_page_permissions_(max_page_permissions) {} 599 600 virtual ~VirtualAddressSpace() = default; 601 602 /** 603 * The page size used inside this space. Guaranteed to be a power of two. 604 * Used as granularity for all page-related operations except for allocation, 605 * which use the allocation_granularity(), see below. 606 * 607 * \returns the page size in bytes. 608 */ 609 size_t page_size() const { return page_size_; } 610 611 /** 612 * The granularity of page allocations and, by extension, of subspace 613 * allocations. This is guaranteed to be a power of two and a multiple of the 614 * page_size(). In practice, this is equal to the page size on most OSes, but 615 * on Windows it is usually 64KB, while the page size is 4KB. 616 * 617 * \returns the allocation granularity in bytes. 618 */ 619 size_t allocation_granularity() const { return allocation_granularity_; } 620 621 /** 622 * The base address of the address space managed by this instance. 623 * 624 * \returns the base address of this address space. 625 */ 626 Address base() const { return base_; } 627 628 /** 629 * The size of the address space managed by this instance. 630 * 631 * \returns the size of this address space in bytes. 632 */ 633 size_t size() const { return size_; } 634 635 /** 636 * The maximum page permissions that pages allocated inside this space can 637 * obtain. 638 * 639 * \returns the maximum page permissions. 640 */ 641 PagePermissions max_page_permissions() const { return max_page_permissions_; } 642 643 /** 644 * Sets the random seed so that GetRandomPageAddress() will generate 645 * repeatable sequences of random addresses. 646 * 647 * \param The seed for the PRNG. 648 */ 649 virtual void SetRandomSeed(int64_t seed) = 0; 650 651 /** 652 * Returns a random address inside this address space, suitable for page 653 * allocations hints. 654 * 655 * \returns a random address aligned to allocation_granularity(). 656 */ 657 virtual Address RandomPageAddress() = 0; 658 659 /** 660 * Allocates private memory pages with the given alignment and permissions. 661 * 662 * \param hint If nonzero, the allocation is attempted to be placed at the 663 * given address first. If that fails, the allocation is attempted to be 664 * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying 665 * zero for the hint always causes this function to choose a random address. 666 * The hint, if specified, must be aligned to the specified alignment. 667 * 668 * \param size The size of the allocation in bytes. Must be a multiple of the 669 * allocation_granularity(). 670 * 671 * \param alignment The alignment of the allocation in bytes. Must be a 672 * multiple of the allocation_granularity() and should be a power of two. 673 * 674 * \param permissions The page permissions of the newly allocated pages. 675 * 676 * \returns the start address of the allocated pages on success, zero on 677 * failure. 678 */ 679 static constexpr Address kNoHint = 0; 680 virtual V8_WARN_UNUSED_RESULT Address 681 AllocatePages(Address hint, size_t size, size_t alignment, 682 PagePermissions permissions) = 0; 683 684 /** 685 * Frees previously allocated pages. 686 * 687 * This function will terminate the process on failure as this implies a bug 688 * in the client. As such, there is no return value. 689 * 690 * \param address The start address of the pages to free. This address must 691 * have been obtained through a call to AllocatePages. 692 * 693 * \param size The size in bytes of the region to free. This must match the 694 * size passed to AllocatePages when the pages were allocated. 695 */ 696 virtual void FreePages(Address address, size_t size) = 0; 697 698 /** 699 * Sets permissions of all allocated pages in the given range. 700 * 701 * \param address The start address of the range. Must be aligned to 702 * page_size(). 703 * 704 * \param size The size in bytes of the range. Must be a multiple 705 * of page_size(). 706 * 707 * \param permissions The new permissions for the range. 708 * 709 * \returns true on success, false otherwise. 710 */ 711 virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions( 712 Address address, size_t size, PagePermissions permissions) = 0; 713 714 /** 715 * Creates a guard region at the specified address. 716 * 717 * Guard regions are guaranteed to cause a fault when accessed and generally 718 * do not count towards any memory consumption limits. Further, allocating 719 * guard regions can usually not fail in subspaces if the region does not 720 * overlap with another region, subspace, or page allocation. 721 * 722 * \param address The start address of the guard region. Must be aligned to 723 * the allocation_granularity(). 724 * 725 * \param size The size of the guard region in bytes. Must be a multiple of 726 * the allocation_granularity(). 727 * 728 * \returns true on success, false otherwise. 729 */ 730 virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, 731 size_t size) = 0; 732 733 /** 734 * Frees an existing guard region. 735 * 736 * This function will terminate the process on failure as this implies a bug 737 * in the client. As such, there is no return value. 738 * 739 * \param address The start address of the guard region to free. This address 740 * must have previously been used as address parameter in a successful 741 * invocation of AllocateGuardRegion. 742 * 743 * \param size The size in bytes of the guard region to free. This must match 744 * the size passed to AllocateGuardRegion when the region was created. 745 */ 746 virtual void FreeGuardRegion(Address address, size_t size) = 0; 747 748 /** 749 * Allocates shared memory pages with the given permissions. 750 * 751 * \param hint Placement hint. See AllocatePages. 752 * 753 * \param size The size of the allocation in bytes. Must be a multiple of the 754 * allocation_granularity(). 755 * 756 * \param permissions The page permissions of the newly allocated pages. 757 * 758 * \param handle A platform-specific handle to a shared memory object. See 759 * the SharedMemoryHandleFromX routines above for ways to obtain these. 760 * 761 * \param offset The offset in the shared memory object at which the mapping 762 * should start. Must be a multiple of the allocation_granularity(). 763 * 764 * \returns the start address of the allocated pages on success, zero on 765 * failure. 766 */ 767 virtual V8_WARN_UNUSED_RESULT Address 768 AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, 769 PlatformSharedMemoryHandle handle, uint64_t offset) = 0; 770 771 /** 772 * Frees previously allocated shared pages. 773 * 774 * This function will terminate the process on failure as this implies a bug 775 * in the client. As such, there is no return value. 776 * 777 * \param address The start address of the pages to free. This address must 778 * have been obtained through a call to AllocateSharedPages. 779 * 780 * \param size The size in bytes of the region to free. This must match the 781 * size passed to AllocateSharedPages when the pages were allocated. 782 */ 783 virtual void FreeSharedPages(Address address, size_t size) = 0; 784 785 /** 786 * Whether this instance can allocate subspaces or not. 787 * 788 * \returns true if subspaces can be allocated, false if not. 789 */ 790 virtual bool CanAllocateSubspaces() = 0; 791 792 /* 793 * Allocate a subspace. 794 * 795 * The address space of a subspace stays reserved in the parent space for the 796 * lifetime of the subspace. As such, it is guaranteed that page allocations 797 * on the parent space cannot end up inside a subspace. 798 * 799 * \param hint Hints where the subspace should be allocated. See 800 * AllocatePages() for more details. 801 * 802 * \param size The size in bytes of the subspace. Must be a multiple of the 803 * allocation_granularity(). 804 * 805 * \param alignment The alignment of the subspace in bytes. Must be a multiple 806 * of the allocation_granularity() and should be a power of two. 807 * 808 * \param max_page_permissions The maximum permissions that pages allocated in 809 * the subspace can obtain. 810 * 811 * \returns a new subspace or nullptr on failure. 812 */ 813 virtual std::unique_ptr
AllocateSubspace( 814 Address hint, size_t size, size_t alignment, 815 PagePermissions max_page_permissions) = 0; 816 817 // 818 // TODO(v8) maybe refactor the methods below before stabilizing the API. For 819 // example by combining them into some form of page operation method that 820 // takes a command enum as parameter. 821 // 822 823 /** 824 * Frees memory in the given [address, address + size) range. address and 825 * size should be aligned to the page_size(). The next write to this memory 826 * area brings the memory transparently back. This should be treated as a 827 * hint to the OS that the pages are no longer needed. It does not guarantee 828 * that the pages will be discarded immediately or at all. 829 * 830 * \returns true on success, false otherwise. Since this method is only a 831 * hint, a successful invocation does not imply that pages have been removed. 832 */ 833 virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address, 834 size_t size) { 835 return true; 836 } 837 /** 838 * Decommits any wired memory pages in the given range, allowing the OS to 839 * reclaim them, and marks the region as inacessible (kNoAccess). The address 840 * range stays reserved and can be accessed again later by changing its 841 * permissions. However, in that case the memory content is guaranteed to be 842 * zero-initialized again. The memory must have been previously allocated by a 843 * call to AllocatePages. 844 * 845 * \returns true on success, false otherwise. 846 */ 847 virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address, 848 size_t size) = 0; 849 850 private: 851 const size_t page_size_; 852 const size_t allocation_granularity_; 853 const Address base_; 854 const size_t size_; 855 const PagePermissions max_page_permissions_; 856 }; 857 858 /** 859 * V8 Allocator used for allocating zone backings. 860 */ 861 class ZoneBackingAllocator { 862 public: 863 using MallocFn = void* (*)(size_t); 864 using FreeFn = void (*)(void*); 865 866 virtual MallocFn GetMallocFn() const { return ::malloc; } 867 virtual FreeFn GetFreeFn() const { return ::free; } 868 }; 869 870 /** 871 * Observer used by V8 to notify the embedder about entering/leaving sections 872 * with high throughput of malloc/free operations. 873 */ 874 class HighAllocationThroughputObserver { 875 public: 876 virtual void EnterSection() {} 877 virtual void LeaveSection() {} 878 }; 879 880 /** 881 * V8 Platform abstraction layer. 882 * 883 * The embedder has to provide an implementation of this interface before 884 * initializing the rest of V8. 885 */ 886 class Platform { 887 public: 888 virtual ~Platform() = default; 889 890 /** 891 * Allows the embedder to manage memory page allocations. 892 */ 893 virtual PageAllocator* GetPageAllocator() { 894 // TODO(bbudge) Make this abstract after all embedders implement this. 895 return nullptr; 896 } 897 898 /** 899 * Allows the embedder to specify a custom allocator used for zones. 900 */ 901 virtual ZoneBackingAllocator* GetZoneBackingAllocator() { 902 static ZoneBackingAllocator default_allocator; 903 return &default_allocator; 904 } 905 906 /** 907 * Enables the embedder to respond in cases where V8 can't allocate large 908 * blocks of memory. V8 retries the failed allocation once after calling this 909 * method. On success, execution continues; otherwise V8 exits with a fatal 910 * error. 911 * Embedder overrides of this function must NOT call back into V8. 912 */ 913 virtual void OnCriticalMemoryPressure() { 914 // TODO(bbudge) Remove this when embedders override the following method. 915 // See crbug.com/634547. 916 } 917 918 /** 919 * Enables the embedder to respond in cases where V8 can't allocate large 920 * memory regions. The |length| parameter is the amount of memory needed. 921 * Returns true if memory is now available. Returns false if no memory could 922 * be made available. V8 will retry allocations until this method returns 923 * false. 924 * 925 * Embedder overrides of this function must NOT call back into V8. 926 */ 927 virtual bool OnCriticalMemoryPressure(size_t length) { return false; } 928 929 /** 930 * Gets the number of worker threads used by 931 * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number 932 * of tasks a work package should be split into. A return value of 0 means 933 * that there are no worker threads available. Note that a value of 0 won't 934 * prohibit V8 from posting tasks using |CallOnWorkerThread|. 935 */ 936 virtual int NumberOfWorkerThreads() = 0; 937 938 /** 939 * Returns a TaskRunner which can be used to post a task on the foreground. 940 * The TaskRunner's NonNestableTasksEnabled() must be true. This function 941 * should only be called from a foreground thread. 942 */ 943 virtual std::shared_ptr
GetForegroundTaskRunner( 944 Isolate* isolate) = 0; 945 946 /** 947 * Schedules a task to be invoked on a worker thread. 948 */ 949 virtual void CallOnWorkerThread(std::unique_ptr
task) = 0; 950 951 /** 952 * Schedules a task that blocks the main thread to be invoked with 953 * high-priority on a worker thread. 954 */ 955 virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr
task) { 956 // Embedders may optionally override this to process these tasks in a high 957 // priority pool. 958 CallOnWorkerThread(std::move(task)); 959 } 960 961 /** 962 * Schedules a task to be invoked with low-priority on a worker thread. 963 */ 964 virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr
task) { 965 // Embedders may optionally override this to process these tasks in a low 966 // priority pool. 967 CallOnWorkerThread(std::move(task)); 968 } 969 970 /** 971 * Schedules a task to be invoked on a worker thread after |delay_in_seconds| 972 * expires. 973 */ 974 virtual void CallDelayedOnWorkerThread(std::unique_ptr
task, 975 double delay_in_seconds) = 0; 976 977 /** 978 * Returns true if idle tasks are enabled for the given |isolate|. 979 */ 980 virtual bool IdleTasksEnabled(Isolate* isolate) { return false; } 981 982 /** 983 * Posts |job_task| to run in parallel. Returns a JobHandle associated with 984 * the Job, which can be joined or canceled. 985 * This avoids degenerate cases: 986 * - Calling CallOnWorkerThread() for each work item, causing significant 987 * overhead. 988 * - Fixed number of CallOnWorkerThread() calls that split the work and might 989 * run for a long time. This is problematic when many components post 990 * "num cores" tasks and all expect to use all the cores. In these cases, 991 * the scheduler lacks context to be fair to multiple same-priority requests 992 * and/or ability to request lower priority work to yield when high priority 993 * work comes in. 994 * A canonical implementation of |job_task| looks like: 995 * class MyJobTask : public JobTask { 996 * public: 997 * MyJobTask(...) : worker_queue_(...) {} 998 * // JobTask: 999 * void Run(JobDelegate* delegate) override { 1000 * while (!delegate->ShouldYield()) { 1001 * // Smallest unit of work. 1002 * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe. 1003 * if (!work_item) return; 1004 * ProcessWork(work_item); 1005 * } 1006 * } 1007 * 1008 * size_t GetMaxConcurrency() const override { 1009 * return worker_queue_.GetSize(); // Thread safe. 1010 * } 1011 * }; 1012 * auto handle = PostJob(TaskPriority::kUserVisible, 1013 * std::make_unique
(...)); 1014 * handle->Join(); 1015 * 1016 * PostJob() and methods of the returned JobHandle/JobDelegate, must never be 1017 * called while holding a lock that could be acquired by JobTask::Run or 1018 * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is 1019 * because [1] JobTask::GetMaxConcurrency may be invoked while holding 1020 * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B) 1021 * if that lock is *never* held while calling back into JobHandle from any 1022 * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or 1023 * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle 1024 * (B=>JobHandle::foo=>B deadlock). 1025 * 1026 * A sufficient PostJob() implementation that uses the default Job provided in 1027 * libplatform looks like: 1028 * std::unique_ptr
PostJob( 1029 * TaskPriority priority, std::unique_ptr
job_task) override { 1030 * return v8::platform::NewDefaultJobHandle( 1031 * this, priority, std::move(job_task), NumberOfWorkerThreads()); 1032 * } 1033 */ 1034 virtual std::unique_ptr
PostJob( 1035 TaskPriority priority, std::unique_ptr
job_task) = 0; 1036 1037 /** 1038 * Monotonically increasing time in seconds from an arbitrary fixed point in 1039 * the past. This function is expected to return at least 1040 * millisecond-precision values. For this reason, 1041 * it is recommended that the fixed point be no further in the past than 1042 * the epoch. 1043 **/ 1044 virtual double MonotonicallyIncreasingTime() = 0; 1045 1046 /** 1047 * Current wall-clock time in milliseconds since epoch. 1048 * This function is expected to return at least millisecond-precision values. 1049 */ 1050 virtual double CurrentClockTimeMillis() = 0; 1051 1052 typedef void (*StackTracePrinter)(); 1053 1054 /** 1055 * Returns a function pointer that print a stack trace of the current stack 1056 * on invocation. Disables printing of the stack trace if nullptr. 1057 */ 1058 virtual StackTracePrinter GetStackTracePrinter() { return nullptr; } 1059 1060 /** 1061 * Returns an instance of a v8::TracingController. This must be non-nullptr. 1062 */ 1063 virtual TracingController* GetTracingController() = 0; 1064 1065 /** 1066 * Tells the embedder to generate and upload a crashdump during an unexpected 1067 * but non-critical scenario. 1068 */ 1069 virtual void DumpWithoutCrashing() {} 1070 1071 /** 1072 * Allows the embedder to observe sections with high throughput allocation 1073 * operations. 1074 */ 1075 virtual HighAllocationThroughputObserver* 1076 GetHighAllocationThroughputObserver() { 1077 static HighAllocationThroughputObserver default_observer; 1078 return &default_observer; 1079 } 1080 1081 protected: 1082 /** 1083 * Default implementation of current wall-clock time in milliseconds 1084 * since epoch. Useful for implementing |CurrentClockTimeMillis| if 1085 * nothing special needed. 1086 */ 1087 V8_EXPORT static double SystemClockTimeMillis(); 1088 }; 1089 1090 } // namespace v8 1091 1092 #endif // V8_V8_PLATFORM_H_
Contact us
|
About us
|
Term of use
|
Copyright © 2000-2025 MyWebUniversity.com ™