// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/wasm/module-compiler.h" #include #include // NOLINT(build/c++11) #include #include #include "src/api/api-inl.h" #include "src/base/enum-set.h" #include "src/base/optional.h" #include "src/base/platform/mutex.h" #include "src/base/platform/semaphore.h" #include "src/base/platform/time.h" #include "src/compiler/wasm-compiler.h" #include "src/debug/debug.h" #include "src/handles/global-handles-inl.h" #include "src/heap/heap-inl.h" // For CodePageCollectionMemoryModificationScope. #include "src/logging/counters-scopes.h" #include "src/logging/metrics.h" #include "src/tracing/trace-event.h" #include "src/wasm/assembler-buffer-cache.h" #include "src/wasm/code-space-access.h" #include "src/wasm/module-decoder.h" #include "src/wasm/streaming-decoder.h" #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-import-wrapper-cache.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-result.h" #include "src/wasm/wasm-serialization.h" #define TRACE_COMPILE(...) \ do { \ if (v8_flags.trace_wasm_compiler) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_STREAMING(...) \ do { \ if (v8_flags.trace_wasm_streaming) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_LAZY(...) \ do { \ if (v8_flags.trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \ } while (false) namespace v8 { namespace internal { namespace wasm { namespace { enum class CompileStrategy : uint8_t { // Compiles functions on first use. In this case, execution will block until // the function's baseline is reached and top tier compilation starts in // background (if applicable). // Lazy compilation can help to reduce startup time and code size at the risk // of blocking execution. kLazy, // Compiles baseline ahead of execution and starts top tier compilation in // background (if applicable). kEager, // Triggers baseline compilation on first use (just like {kLazy}) with the // difference that top tier compilation is started eagerly. // This strategy can help to reduce startup time at the risk of blocking // execution, but only in its early phase (until top tier compilation // finishes). kLazyBaselineEagerTopTier, // Marker for default strategy. kDefault = kEager, }; class CompilationStateImpl; class CompilationUnitBuilder; class V8_NODISCARD BackgroundCompileScope { public: explicit BackgroundCompileScope(std::weak_ptr native_module) : native_module_(native_module.lock()) {} NativeModule* native_module() const { DCHECK(native_module_); return native_module_.get(); } inline CompilationStateImpl* compilation_state() const; bool cancelled() const; private: // Keep the native module alive while in this scope. std::shared_ptr native_module_; }; enum CompileBaselineOnly : bool { kBaselineOnly = true, kBaselineOrTopTier = false }; // A set of work-stealing queues (vectors of units). Each background compile // task owns one of the queues and steals from all others once its own queue // runs empty. class CompilationUnitQueues { public: // Public API for QueueImpl. struct Queue { bool ShouldPublish(int num_processed_units) const; }; explicit CompilationUnitQueues(int num_declared_functions) : num_declared_functions_(num_declared_functions) { // Add one first queue, to add units to. queues_.emplace_back(std::make_unique(0)); #if !defined(__cpp_lib_atomic_value_initialization) || \ __cpp_lib_atomic_value_initialization < 201911L for (auto& atomic_counter : num_units_) { std::atomic_init(&atomic_counter, size_t{0}); } #endif top_tier_compiled_ = std::make_unique[]>(num_declared_functions); #if !defined(__cpp_lib_atomic_value_initialization) || \ __cpp_lib_atomic_value_initialization < 201911L for (int i = 0; i < num_declared_functions; i++) { std::atomic_init(&top_tier_compiled_.get()[i], false); } #endif } Queue* GetQueueForTask(int task_id) { int required_queues = task_id + 1; { std::shared_lock queues_guard{queues_mutex_}; if (V8_LIKELY(static_cast(queues_.size()) >= required_queues)) { return queues_[task_id].get(); } } // Otherwise increase the number of queues. std::unique_lock queues_guard{queues_mutex_}; int num_queues = static_cast(queues_.size()); while (num_queues < required_queues) { int steal_from = num_queues + 1; queues_.emplace_back(std::make_unique(steal_from)); ++num_queues; } // Update the {publish_limit}s of all queues. // We want background threads to publish regularly (to avoid contention when // they are all publishing at the end). On the other side, each publishing // has some overhead (part of it for synchronizing between threads), so it // should not happen *too* often. Thus aim for 4-8 publishes per thread, but // distribute it such that publishing is likely to happen at different // times. int units_per_thread = num_declared_functions_ / num_queues; int min = std::max(10, units_per_thread / 8); int queue_id = 0; for (auto& queue : queues_) { // Set a limit between {min} and {2*min}, but not smaller than {10}. int limit = min + (min * queue_id / num_queues); queue->publish_limit.store(limit, std::memory_order_relaxed); ++queue_id; } return queues_[task_id].get(); } base::Optional GetNextUnit( Queue* queue, CompileBaselineOnly baseline_only) { // As long as any lower-tier units are outstanding we need to steal them // before executing own higher-tier units. int max_tier = baseline_only ? kBaseline : kTopTier; for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) { if (auto unit = GetNextUnitOfTier(queue, tier)) { size_t old_units_count = num_units_[tier].fetch_sub(1, std::memory_order_relaxed); DCHECK_LE(1, old_units_count); USE(old_units_count); return unit; } } return {}; } void AddUnits(base::Vector baseline_units, base::Vector top_tier_units, const WasmModule* module) { DCHECK_LT(0, baseline_units.size() + top_tier_units.size()); // Add to the individual queues in a round-robin fashion. No special care is // taken to balance them; they will be balanced by work stealing. QueueImpl* queue; { int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed); std::shared_lock queues_guard{queues_mutex_}; while (!next_queue_to_add.compare_exchange_weak( queue_to_add, next_task_id(queue_to_add, queues_.size()), std::memory_order_relaxed)) { // Retry with updated {queue_to_add}. } queue = queues_[queue_to_add].get(); } base::MutexGuard guard(&queue->mutex); base::Optional big_units_guard; for (auto pair : {std::make_pair(int{kBaseline}, baseline_units), std::make_pair(int{kTopTier}, top_tier_units)}) { int tier = pair.first; base::Vector units = pair.second; if (units.empty()) continue; num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed); for (WasmCompilationUnit unit : units) { size_t func_size = module->functions[unit.func_index()].code.length(); if (func_size <= kBigUnitsLimit) { queue->units[tier].push_back(unit); } else { if (!big_units_guard) { big_units_guard.emplace(&big_units_queue_.mutex); } big_units_queue_.has_units[tier].store(true, std::memory_order_relaxed); big_units_queue_.units[tier].emplace(func_size, unit); } } } } void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) { std::shared_lock queues_guard{queues_mutex_}; // Add to the individual queues in a round-robin fashion. No special care is // taken to balance them; they will be balanced by work stealing. // Priorities should only be seen as a hint here; without balancing, we // might pop a unit with lower priority from one queue while other queues // still hold higher-priority units. // Since updating priorities in a std::priority_queue is difficult, we just // add new units with higher priorities, and use the // {CompilationUnitQueues::top_tier_compiled_} array to discard units for // functions which are already being compiled. int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed); while (!next_queue_to_add.compare_exchange_weak( queue_to_add, next_task_id(queue_to_add, queues_.size()), std::memory_order_relaxed)) { // Retry with updated {queue_to_add}. } { auto* queue = queues_[queue_to_add].get(); base::MutexGuard guard(&queue->mutex); queue->top_tier_priority_units.emplace(priority, unit); } num_priority_units_.fetch_add(1, std::memory_order_relaxed); num_units_[kTopTier].fetch_add(1, std::memory_order_relaxed); } // Get the current total number of units in all queues. This is only a // momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit // if this method returns non-zero. size_t GetTotalSize() const { size_t total = 0; for (auto& atomic_counter : num_units_) { total += atomic_counter.load(std::memory_order_relaxed); } return total; } private: // Store tier in int so we can easily loop over it: static constexpr int kBaseline = 0; static constexpr int kTopTier = 1; static constexpr int kNumTiers = kTopTier + 1; // Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending // order of their function body size. static constexpr size_t kBigUnitsLimit = 4096; struct BigUnit { BigUnit(size_t func_size, WasmCompilationUnit unit) : func_size{func_size}, unit(unit) {} size_t func_size; WasmCompilationUnit unit; bool operator<(const BigUnit& other) const { return func_size < other.func_size; } }; struct TopTierPriorityUnit { TopTierPriorityUnit(int priority, WasmCompilationUnit unit) : priority(priority), unit(unit) {} size_t priority; WasmCompilationUnit unit; bool operator<(const TopTierPriorityUnit& other) const { return priority < other.priority; } }; struct BigUnitsQueue { BigUnitsQueue() { #if !defined(__cpp_lib_atomic_value_initialization) || \ __cpp_lib_atomic_value_initialization < 201911L for (auto& atomic : has_units) std::atomic_init(&atomic, false); #endif } base::Mutex mutex; // Can be read concurrently to check whether any elements are in the queue. std::atomic has_units[kNumTiers]; // Protected by {mutex}: std::priority_queue units[kNumTiers]; }; struct QueueImpl : public Queue { explicit QueueImpl(int next_steal_task_id) : next_steal_task_id(next_steal_task_id) {} // Number of units after which the task processing this queue should publish // compilation results. Updated (reduced, using relaxed ordering) when new // queues are allocated. If there is only one thread running, we can delay // publishing arbitrarily. std::atomic publish_limit{kMaxInt}; base::Mutex mutex; // All fields below are protected by {mutex}. std::vector units[kNumTiers]; std::priority_queue top_tier_priority_units; int next_steal_task_id; }; int next_task_id(int task_id, size_t num_queues) const { int next = task_id + 1; return next == static_cast(num_queues) ? 0 : next; } int GetLowestTierWithUnits() const { for (int tier = 0; tier < kNumTiers; ++tier) { if (num_units_[tier].load(std::memory_order_relaxed) > 0) return tier; } return kNumTiers; } base::Optional GetNextUnitOfTier(Queue* public_queue, int tier) { QueueImpl* queue = static_cast(public_queue); // First check whether there is a priority unit. Execute that first. if (tier == kTopTier) { if (auto unit = GetTopTierPriorityUnit(queue)) { return unit; } } // Then check whether there is a big unit of that tier. if (auto unit = GetBigUnitOfTier(tier)) return unit; // Finally check whether our own queue has a unit of the wanted tier. If // so, return it, otherwise get the task id to steal from. int steal_task_id; { base::MutexGuard mutex_guard(&queue->mutex); if (!queue->units[tier].empty()) { auto unit = queue->units[tier].back(); queue->units[tier].pop_back(); return unit; } steal_task_id = queue->next_steal_task_id; } // Try to steal from all other queues. If this succeeds, return one of the // stolen units. { std::shared_lock guard{queues_mutex_}; for (size_t steal_trials = 0; steal_trials < queues_.size(); ++steal_trials, ++steal_task_id) { if (steal_task_id >= static_cast(queues_.size())) { steal_task_id = 0; } if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) { return unit; } } } // If we reach here, we didn't find any unit of the requested tier. return {}; } base::Optional GetBigUnitOfTier(int tier) { // Fast path without locking. if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) { return {}; } base::MutexGuard guard(&big_units_queue_.mutex); if (big_units_queue_.units[tier].empty()) return {}; WasmCompilationUnit unit = big_units_queue_.units[tier].top().unit; big_units_queue_.units[tier].pop(); if (big_units_queue_.units[tier].empty()) { big_units_queue_.has_units[tier].store(false, std::memory_order_relaxed); } return unit; } base::Optional GetTopTierPriorityUnit(QueueImpl* queue) { // Fast path without locking. if (num_priority_units_.load(std::memory_order_relaxed) == 0) { return {}; } int steal_task_id; { base::MutexGuard mutex_guard(&queue->mutex); while (!queue->top_tier_priority_units.empty()) { auto unit = queue->top_tier_priority_units.top().unit; queue->top_tier_priority_units.pop(); num_priority_units_.fetch_sub(1, std::memory_order_relaxed); if (!top_tier_compiled_[unit.func_index()].exchange( true, std::memory_order_relaxed)) { return unit; } num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed); } steal_task_id = queue->next_steal_task_id; } // Try to steal from all other queues. If this succeeds, return one of the // stolen units. { std::shared_lock guard{queues_mutex_}; for (size_t steal_trials = 0; steal_trials < queues_.size(); ++steal_trials, ++steal_task_id) { if (steal_task_id >= static_cast(queues_.size())) { steal_task_id = 0; } if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) { return unit; } } } return {}; } // Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return // first stolen unit (rest put in queue of {task_id}), or {nullopt} if // {steal_from_task_id} had no units of {wanted_tier}. // Hold a shared lock on {queues_mutex_} when calling this method. base::Optional StealUnitsAndGetFirst( QueueImpl* queue, int steal_from_task_id, int wanted_tier) { auto* steal_queue = queues_[steal_from_task_id].get(); // Cannot steal from own queue. if (steal_queue == queue) return {}; std::vector stolen; base::Optional returned_unit; { base::MutexGuard guard(&steal_queue->mutex); auto* steal_from_vector = &steal_queue->units[wanted_tier]; if (steal_from_vector->empty()) return {}; size_t remaining = steal_from_vector->size() / 2; auto steal_begin = steal_from_vector->begin() + remaining; returned_unit = *steal_begin; stolen.assign(steal_begin + 1, steal_from_vector->end()); steal_from_vector->erase(steal_begin, steal_from_vector->end()); } base::MutexGuard guard(&queue->mutex); auto* target_queue = &queue->units[wanted_tier]; target_queue->insert(target_queue->end(), stolen.begin(), stolen.end()); queue->next_steal_task_id = steal_from_task_id + 1; return returned_unit; } // Steal one priority unit from {steal_from_task_id} to {task_id}. Return // stolen unit, or {nullopt} if {steal_from_task_id} had no priority units. // Hold a shared lock on {queues_mutex_} when calling this method. base::Optional StealTopTierPriorityUnit( QueueImpl* queue, int steal_from_task_id) { auto* steal_queue = queues_[steal_from_task_id].get(); // Cannot steal from own queue. if (steal_queue == queue) return {}; base::Optional returned_unit; { base::MutexGuard guard(&steal_queue->mutex); while (true) { if (steal_queue->top_tier_priority_units.empty()) return {}; auto unit = steal_queue->top_tier_priority_units.top().unit; steal_queue->top_tier_priority_units.pop(); num_priority_units_.fetch_sub(1, std::memory_order_relaxed); if (!top_tier_compiled_[unit.func_index()].exchange( true, std::memory_order_relaxed)) { returned_unit = unit; break; } num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed); } } base::MutexGuard guard(&queue->mutex); queue->next_steal_task_id = steal_from_task_id + 1; return returned_unit; } // {queues_mutex_} protectes {queues_}; std::shared_mutex queues_mutex_; std::vector> queues_; const int num_declared_functions_; BigUnitsQueue big_units_queue_; std::atomic num_units_[kNumTiers]; std::atomic num_priority_units_{0}; std::unique_ptr[]> top_tier_compiled_; std::atomic next_queue_to_add{0}; }; bool CompilationUnitQueues::Queue::ShouldPublish( int num_processed_units) const { auto* queue = static_cast(this); return num_processed_units >= queue->publish_limit.load(std::memory_order_relaxed); } // The {CompilationStateImpl} keeps track of the compilation state of the // owning NativeModule, i.e. which functions are left to be compiled. // It contains a task manager to allow parallel and asynchronous background // compilation of functions. // Its public interface {CompilationState} lives in compilation-environment.h. class CompilationStateImpl { public: CompilationStateImpl(const std::shared_ptr& native_module, std::shared_ptr async_counters, DynamicTiering dynamic_tiering); ~CompilationStateImpl() { if (compile_job_->IsValid()) compile_job_->CancelAndDetach(); } // Call right after the constructor, after the {compilation_state_} field in // the {NativeModule} has been initialized. void InitCompileJob(); // {kCancelUnconditionally}: Cancel all compilation. // {kCancelInitialCompilation}: Cancel all compilation if initial (baseline) // compilation is not finished yet. enum CancellationPolicy { kCancelUnconditionally, kCancelInitialCompilation }; void CancelCompilation(CancellationPolicy); bool cancelled() const; // Apply a compilation hint to the initial compilation progress, updating all // internal fields accordingly. void ApplyCompilationHintToInitialProgress(const WasmCompilationHint& hint, size_t hint_idx); // Initialize compilation progress. Set compilation tiers to expect for // baseline and top tier compilation. Must be set before // {CommitCompilationUnits} is invoked which triggers background compilation. void InitializeCompilationProgress(int num_import_wrappers, int num_export_wrappers); // Initialize the compilation progress after deserialization. This is needed // for recompilation (e.g. for tier down) to work later. void InitializeCompilationProgressAfterDeserialization( base::Vector lazy_functions, base::Vector eager_functions); // Initializes compilation units based on the information encoded in the // {compilation_progress_}. void InitializeCompilationUnits( std::unique_ptr builder); // Adds compilation units for another function to the // {CompilationUnitBuilder}. This function is the streaming compilation // equivalent to {InitializeCompilationUnits}. void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index); // Initialize recompilation of the whole module: Setup compilation progress // for recompilation and add the respective compilation units. The callback is // called immediately if no recompilation is needed, or called later // otherwise. void InitializeRecompilation(TieringState new_tiering_state, std::unique_ptr recompilation_finished_callback); // Add the callback to be called on compilation events. Needs to be // set before {CommitCompilationUnits} is run to ensure that it receives all // events. The callback object must support being deleted from any thread. void AddCallback(std::unique_ptr callback); // Inserts new functions to compile and kicks off compilation. void CommitCompilationUnits( base::Vector baseline_units, base::Vector top_tier_units, base::Vector> js_to_wasm_wrapper_units); void CommitTopTierCompilationUnit(WasmCompilationUnit); void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t); CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id); base::Optional GetNextCompilationUnit( CompilationUnitQueues::Queue*, CompileBaselineOnly); std::shared_ptr GetNextJSToWasmWrapperCompilationUnit(); void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module); void OnFinishedUnits(base::Vector); void OnFinishedJSToWasmWrapperUnits(int num); void OnCompilationStopped(WasmFeatures detected); void PublishDetectedFeatures(Isolate*); void SchedulePublishCompilationResults( std::vector> unpublished_code); size_t NumOutstandingCompilations() const; void SetError(); void WaitForCompilationEvent(CompilationEvent event); void SetHighPriority() { // TODO(wasm): Keep a lower priority for TurboFan-only jobs. compile_job_->UpdatePriority(TaskPriority::kUserBlocking); } bool failed() const { return compile_failed_.load(std::memory_order_relaxed); } bool baseline_compilation_finished() const { base::MutexGuard guard(&callbacks_mutex_); return outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0; } bool recompilation_finished() const { base::MutexGuard guard(&callbacks_mutex_); return outstanding_recompilation_functions_ == 0; } DynamicTiering dynamic_tiering() const { return dynamic_tiering_; } Counters* counters() const { return async_counters_.get(); } void SetWireBytesStorage( std::shared_ptr wire_bytes_storage) { base::MutexGuard guard(&mutex_); wire_bytes_storage_ = std::move(wire_bytes_storage); } std::shared_ptr GetWireBytesStorage() const { base::MutexGuard guard(&mutex_); DCHECK_NOT_NULL(wire_bytes_storage_); return wire_bytes_storage_; } void set_compilation_id(int compilation_id) { DCHECK_EQ(compilation_id_, kInvalidCompilationID); compilation_id_ = compilation_id; } std::weak_ptr const native_module_weak() const { return native_module_weak_; } private: // Returns the potentially-updated {function_progress}. uint8_t AddCompilationUnitInternal(CompilationUnitBuilder* builder, int function_index, uint8_t function_progress); // Trigger callbacks according to the internal counters below // (outstanding_...), plus the given events. // Hold the {callbacks_mutex_} when calling this method. void TriggerCallbacks(base::EnumSet additional_events = {}); void PublishCompilationResults( std::vector> unpublished_code); void PublishCode(base::Vector> codes); NativeModule* const native_module_; std::weak_ptr const native_module_weak_; const std::shared_ptr async_counters_; // Compilation error, atomically updated. This flag can be updated and read // using relaxed semantics. std::atomic compile_failed_{false}; // True if compilation was cancelled and worker threads should return. This // flag can be updated and read using relaxed semantics. std::atomic compile_cancelled_{false}; CompilationUnitQueues compilation_unit_queues_; // Number of wrappers to be compiled. Initialized once, counted down in // {GetNextJSToWasmWrapperCompilationUnit}. std::atomic outstanding_js_to_wasm_wrappers_{0}; // Wrapper compilation units are stored in shared_ptrs so that they are kept // alive by the tasks even if the NativeModule dies. std::vector> js_to_wasm_wrapper_units_; // Cache the dynamic tiering configuration to be consistent for the whole // compilation. const DynamicTiering dynamic_tiering_; // This mutex protects all information of this {CompilationStateImpl} which is // being accessed concurrently. mutable base::Mutex mutex_; // The compile job handle, initialized right after construction of // {CompilationStateImpl}. std::unique_ptr compile_job_; // The compilation id to identify trace events linked to this compilation. static constexpr int kInvalidCompilationID = -1; int compilation_id_ = kInvalidCompilationID; ////////////////////////////////////////////////////////////////////////////// // Protected by {mutex_}: // Features detected to be used in this module. Features can be detected // as a module is being compiled. WasmFeatures detected_features_ = WasmFeatures::None(); // Abstraction over the storage of the wire bytes. Held in a shared_ptr so // that background compilation jobs can keep the storage alive while // compiling. std::shared_ptr wire_bytes_storage_; // End of fields protected by {mutex_}. ////////////////////////////////////////////////////////////////////////////// // This mutex protects the callbacks vector, and the counters used to // determine which callbacks to call. The counters plus the callbacks // themselves need to be synchronized to ensure correct order of events. mutable base::Mutex callbacks_mutex_; ////////////////////////////////////////////////////////////////////////////// // Protected by {callbacks_mutex_}: // Callbacks to be called on compilation events. std::vector> callbacks_; // Events that already happened. base::EnumSet finished_events_; int outstanding_baseline_units_ = 0; int outstanding_export_wrappers_ = 0; // The amount of generated top tier code since the last // {kFinishedCompilationChunk} event. size_t bytes_since_last_chunk_ = 0; std::vector compilation_progress_; int outstanding_recompilation_functions_ = 0; TieringState tiering_state_ = kTieredUp; // End of fields protected by {callbacks_mutex_}. ////////////////////////////////////////////////////////////////////////////// // {publish_mutex_} protects {publish_queue_} and {publisher_running_}. base::Mutex publish_mutex_; std::vector> publish_queue_; bool publisher_running_ = false; // Encoding of fields in the {compilation_progress_} vector. using RequiredBaselineTierField = base::BitField8; using RequiredTopTierField = base::BitField8; using ReachedTierField = base::BitField8; using MissingRecompilationField = base::BitField8; }; CompilationStateImpl* Impl(CompilationState* compilation_state) { return reinterpret_cast(compilation_state); } const CompilationStateImpl* Impl(const CompilationState* compilation_state) { return reinterpret_cast(compilation_state); } CompilationStateImpl* BackgroundCompileScope::compilation_state() const { DCHECK(native_module_); return Impl(native_module_->compilation_state()); } bool BackgroundCompileScope::cancelled() const { return native_module_ == nullptr || Impl(native_module_->compilation_state())->cancelled(); } void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) { using Feature = v8::Isolate::UseCounterFeature; constexpr static std::pair kUseCounters[] = { {kFeature_reftypes, Feature::kWasmRefTypes}, {kFeature_simd, Feature::kWasmSimdOpcodes}, {kFeature_threads, Feature::kWasmThreadOpcodes}, {kFeature_eh, Feature::kWasmExceptionHandling}}; for (auto& feature : kUseCounters) { if (detected.contains(feature.first)) isolate->CountUsage(feature.second); } } } // namespace ////////////////////////////////////////////////////// // PIMPL implementation of {CompilationState}. CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); } void CompilationState::InitCompileJob() { Impl(this)->InitCompileJob(); } void CompilationState::CancelCompilation() { Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally); } void CompilationState::CancelInitialCompilation() { Impl(this)->CancelCompilation( CompilationStateImpl::kCancelInitialCompilation); } void CompilationState::SetError() { Impl(this)->SetError(); } void CompilationState::SetWireBytesStorage( std::shared_ptr wire_bytes_storage) { Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage)); } std::shared_ptr CompilationState::GetWireBytesStorage() const { return Impl(this)->GetWireBytesStorage(); } void CompilationState::AddCallback( std::unique_ptr callback) { return Impl(this)->AddCallback(std::move(callback)); } void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); } void CompilationState::InitializeAfterDeserialization( base::Vector lazy_functions, base::Vector eager_functions) { Impl(this)->InitializeCompilationProgressAfterDeserialization( lazy_functions, eager_functions); } bool CompilationState::failed() const { return Impl(this)->failed(); } bool CompilationState::baseline_compilation_finished() const { return Impl(this)->baseline_compilation_finished(); } bool CompilationState::recompilation_finished() const { return Impl(this)->recompilation_finished(); } void CompilationState::set_compilation_id(int compilation_id) { Impl(this)->set_compilation_id(compilation_id); } DynamicTiering CompilationState::dynamic_tiering() const { return Impl(this)->dynamic_tiering(); } // static std::unique_ptr CompilationState::New( const std::shared_ptr& native_module, std::shared_ptr async_counters, DynamicTiering dynamic_tiering) { return std::unique_ptr(reinterpret_cast( new CompilationStateImpl(std::move(native_module), std::move(async_counters), dynamic_tiering))); } // End of PIMPL implementation of {CompilationState}. ////////////////////////////////////////////////////// namespace { ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint, ExecutionTier default_tier) { switch (hint) { case WasmCompilationHintTier::kDefault: return default_tier; case WasmCompilationHintTier::kBaseline: return ExecutionTier::kLiftoff; case WasmCompilationHintTier::kOptimized: return ExecutionTier::kTurbofan; } UNREACHABLE(); } const WasmCompilationHint* GetCompilationHint(const WasmModule* module, uint32_t func_index) { DCHECK_LE(module->num_imported_functions, func_index); uint32_t hint_index = declared_function_index(module, func_index); const std::vector& compilation_hints = module->compilation_hints; if (hint_index < compilation_hints.size()) { return &compilation_hints[hint_index]; } return nullptr; } CompileStrategy GetCompileStrategy(const WasmModule* module, const WasmFeatures& enabled_features, uint32_t func_index, bool lazy_module) { if (lazy_module) return CompileStrategy::kLazy; if (!enabled_features.has_compilation_hints()) { return CompileStrategy::kDefault; } auto* hint = GetCompilationHint(module, func_index); if (hint == nullptr) return CompileStrategy::kDefault; switch (hint->strategy) { case WasmCompilationHintStrategy::kLazy: return CompileStrategy::kLazy; case WasmCompilationHintStrategy::kEager: return CompileStrategy::kEager; case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier: return CompileStrategy::kLazyBaselineEagerTopTier; case WasmCompilationHintStrategy::kDefault: return CompileStrategy::kDefault; } } struct ExecutionTierPair { ExecutionTier baseline_tier; ExecutionTier top_tier; }; ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module, DynamicTiering dynamic_tiering, bool lazy_module) { const WasmModule* module = native_module->module(); if (is_asmjs_module(module)) { return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan}; } // TODO(13224): Use lazy compilation for debug code. if (native_module->IsTieredDown()) { return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff}; } if (lazy_module) { return {ExecutionTier::kNone, ExecutionTier::kNone}; } ExecutionTier baseline_tier = v8_flags.liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan; bool eager_tier_up = !dynamic_tiering && v8_flags.wasm_tier_up; ExecutionTier top_tier = eager_tier_up ? ExecutionTier::kTurbofan : baseline_tier; return {baseline_tier, top_tier}; } ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module, uint32_t func_index) { DynamicTiering dynamic_tiering = Impl(native_module->compilation_state())->dynamic_tiering(); // For lazy compilation, get the tiers we would use if lazy compilation is // disabled. constexpr bool kNotLazy = false; ExecutionTierPair tiers = GetDefaultTiersPerModule(native_module, dynamic_tiering, kNotLazy); // Check if compilation hints override default tiering behaviour. if (native_module->enabled_features().has_compilation_hints()) { if (auto* hint = GetCompilationHint(native_module->module(), func_index)) { tiers.baseline_tier = ApplyHintToExecutionTier(hint->baseline_tier, tiers.baseline_tier); tiers.top_tier = ApplyHintToExecutionTier(hint->top_tier, tiers.top_tier); } } if (V8_UNLIKELY(v8_flags.wasm_tier_up_filter >= 0 && func_index != static_cast(v8_flags.wasm_tier_up_filter))) { tiers.top_tier = tiers.baseline_tier; } // Correct top tier if necessary. static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan, "Assume an order on execution tiers"); if (tiers.baseline_tier > tiers.top_tier) { tiers.top_tier = tiers.baseline_tier; } return tiers; } // The {CompilationUnitBuilder} builds compilation units and stores them in an // internal buffer. The buffer is moved into the working queue of the // {CompilationStateImpl} when {Commit} is called. class CompilationUnitBuilder { public: explicit CompilationUnitBuilder(NativeModule* native_module) : native_module_(native_module) {} void AddImportUnit(uint32_t func_index) { DCHECK_GT(native_module_->module()->num_imported_functions, func_index); baseline_units_.emplace_back(func_index, ExecutionTier::kNone, kNoDebugging); } void AddJSToWasmWrapperUnit( std::shared_ptr unit) { js_to_wasm_wrapper_units_.emplace_back(std::move(unit)); } void AddBaselineUnit(int func_index, ExecutionTier tier) { baseline_units_.emplace_back(func_index, tier, kNoDebugging); } void AddTopTierUnit(int func_index, ExecutionTier tier) { tiering_units_.emplace_back(func_index, tier, kNoDebugging); } void AddDebugUnit(int func_index) { baseline_units_.emplace_back(func_index, ExecutionTier::kLiftoff, kForDebugging); } void AddRecompilationUnit(int func_index, ExecutionTier tier) { // For recompilation, just treat all units like baseline units. baseline_units_.emplace_back( func_index, tier, tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging); } bool Commit() { if (baseline_units_.empty() && tiering_units_.empty() && js_to_wasm_wrapper_units_.empty()) { return false; } compilation_state()->CommitCompilationUnits( base::VectorOf(baseline_units_), base::VectorOf(tiering_units_), base::VectorOf(js_to_wasm_wrapper_units_)); Clear(); return true; } void Clear() { baseline_units_.clear(); tiering_units_.clear(); js_to_wasm_wrapper_units_.clear(); } const WasmModule* module() { return native_module_->module(); } private: CompilationStateImpl* compilation_state() const { return Impl(native_module_->compilation_state()); } NativeModule* const native_module_; std::vector baseline_units_; std::vector tiering_units_; std::vector> js_to_wasm_wrapper_units_; }; WasmError GetWasmErrorWithName(ModuleWireBytes wire_bytes, const WasmFunction* func, const WasmModule* module, WasmError error) { WasmName name = wire_bytes.GetNameOrNull(func, module); if (name.begin() == nullptr) { return WasmError(error.offset(), "Compiling function #%d failed: %s", func->func_index, error.message().c_str()); } else { TruncatedUserString<> truncated_name(name); return WasmError(error.offset(), "Compiling function #%d:\"%.*s\" failed: %s", func->func_index, truncated_name.length(), truncated_name.start(), error.message().c_str()); } } void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes, const WasmFunction* func, const WasmModule* module, WasmError error) { thrower->CompileFailed(GetWasmErrorWithName(std::move(wire_bytes), func, module, std::move(error))); } DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index, base::Vector code, AccountingAllocator* allocator, WasmFeatures enabled_features) { const WasmFunction* func = &module->functions[func_index]; FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end()}; WasmFeatures detected_features; return ValidateFunctionBody(allocator, enabled_features, module, &detected_features, body); } enum OnlyLazyFunctions : bool { kAllFunctions = false, kOnlyLazyFunctions = true, }; void ValidateSequentially( const WasmModule* module, NativeModule* native_module, Counters* counters, AccountingAllocator* allocator, ErrorThrower* thrower, bool lazy_module, OnlyLazyFunctions only_lazy_functions = kAllFunctions) { DCHECK(!thrower->error()); uint32_t start = module->num_imported_functions; uint32_t end = start + module->num_declared_functions; auto enabled_features = native_module->enabled_features(); for (uint32_t func_index = start; func_index < end; func_index++) { // Skip non-lazy functions if requested. if (only_lazy_functions) { CompileStrategy strategy = GetCompileStrategy(module, enabled_features, func_index, lazy_module); if (strategy != CompileStrategy::kLazy && strategy != CompileStrategy::kLazyBaselineEagerTopTier) { continue; } } ModuleWireBytes wire_bytes{native_module->wire_bytes()}; const WasmFunction* func = &module->functions[func_index]; base::Vector code = wire_bytes.GetFunctionBytes(func); DecodeResult result = ValidateSingleFunction(module, func_index, code, allocator, enabled_features); if (result.failed()) { SetCompileError(thrower, wire_bytes, func, module, result.error()); } } } bool IsLazyModule(const WasmModule* module) { return v8_flags.wasm_lazy_compilation || (v8_flags.asm_wasm_lazy_compilation && is_asmjs_module(module)); } class CompileLazyTimingScope { public: CompileLazyTimingScope(Counters* counters, NativeModule* native_module) : counters_(counters), native_module_(native_module) { timer_.Start(); } ~CompileLazyTimingScope() { base::TimeDelta elapsed = timer_.Elapsed(); native_module_->AddLazyCompilationTimeSample(elapsed.InMicroseconds()); counters_->wasm_lazy_compile_time()->AddTimedSample(elapsed); } private: Counters* counters_; NativeModule* native_module_; base::ElapsedTimer timer_; }; } // namespace bool CompileLazy(Isolate* isolate, Handle instance, int func_index) { Handle module_object(instance->module_object(), isolate); NativeModule* native_module = module_object->native_module(); Counters* counters = isolate->counters(); // Put the timer scope around everything, including the {CodeSpaceWriteScope} // and its destruction, to measure complete overhead (apart from the runtime // function itself, which has constant overhead). base::Optional lazy_compile_time_scope; if (base::TimeTicks::IsHighResolution()) { lazy_compile_time_scope.emplace(counters, native_module); } DCHECK(!native_module->lazy_compile_frozen()); TRACE_LAZY("Compiling wasm-function#%d.\n", func_index); base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported() ? base::ThreadTicks::Now() : base::ThreadTicks(); CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); ExecutionTierPair tiers = GetLazyCompilationTiers(native_module, func_index); DCHECK_LE(native_module->num_imported_functions(), func_index); DCHECK_LT(func_index, native_module->num_functions()); WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier, kNoDebugging}; CompilationEnv env = native_module->CreateCompilationEnv(); // TODO(wasm): Use an assembler buffer cache for lazy compilation. AssemblerBufferCache* assembler_buffer_cache = nullptr; WasmFeatures detected_features; WasmCompilationResult result = baseline_unit.ExecuteCompilation( &env, compilation_state->GetWireBytesStorage().get(), counters, assembler_buffer_cache, &detected_features); compilation_state->OnCompilationStopped(detected_features); if (!thread_ticks.IsNull()) { native_module->UpdateCPUDuration( (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(), tiers.baseline_tier); } // During lazy compilation, we can only get compilation errors when // {--wasm-lazy-validation} is enabled. Otherwise, the module was fully // verified before starting its execution. CHECK_IMPLIES(result.failed(), v8_flags.wasm_lazy_validation); if (result.failed()) { return false; } WasmCodeRefScope code_ref_scope; WasmCode* code; { CodeSpaceWriteScope code_space_write_scope(native_module); code = native_module->PublishCode( native_module->AddCompiledCode(std::move(result))); } DCHECK_EQ(func_index, code->index()); if (WasmCode::ShouldBeLogged(isolate)) { DisallowGarbageCollection no_gc; Object url_obj = module_object->script().name(); DCHECK(url_obj.IsString() || url_obj.IsUndefined()); std::unique_ptr url = url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr; code->LogCode(isolate, url.get(), module_object->script().id()); } counters->wasm_lazily_compiled_functions()->Increment(); const WasmModule* module = native_module->module(); const bool lazy_module = IsLazyModule(module); if (GetCompileStrategy(module, native_module->enabled_features(), func_index, lazy_module) == CompileStrategy::kLazy && tiers.baseline_tier < tiers.top_tier) { WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging}; compilation_state->CommitTopTierCompilationUnit(tiering_unit); } // Allocate feedback vector if needed. int feedback_vector_slots = NumFeedbackSlots(module, func_index); if (feedback_vector_slots > 0) { DCHECK(v8_flags.wasm_speculative_inlining); Handle vector = isolate->factory()->NewFixedArrayWithZeroes(feedback_vector_slots); instance->feedback_vectors().set( declared_function_index(module, func_index), *vector); } return true; } void ThrowLazyCompilationError(Isolate* isolate, const NativeModule* native_module, int func_index) { const WasmModule* module = native_module->module(); CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); const WasmFunction* func = &module->functions[func_index]; base::Vector code = compilation_state->GetWireBytesStorage()->GetCode(func->code); WasmEngine* engine = GetWasmEngine(); auto enabled_features = native_module->enabled_features(); DecodeResult decode_result = ValidateSingleFunction( module, func_index, code, engine->allocator(), enabled_features); CHECK(decode_result.failed()); wasm::ErrorThrower thrower(isolate, nullptr); SetCompileError(&thrower, ModuleWireBytes(native_module->wire_bytes()), func, module, decode_result.error()); } class TransitiveTypeFeedbackProcessor { public: static void Process(WasmInstanceObject instance, int func_index) { TransitiveTypeFeedbackProcessor{instance, func_index}.ProcessQueue(); } private: TransitiveTypeFeedbackProcessor(WasmInstanceObject instance, int func_index) : instance_(instance), module_(instance.module()), mutex_guard(&module_->type_feedback.mutex), feedback_for_function_(module_->type_feedback.feedback_for_function) { queue_.insert(func_index); } ~TransitiveTypeFeedbackProcessor() { DCHECK(queue_.empty()); } void ProcessQueue() { while (!queue_.empty()) { auto next = queue_.cbegin(); ProcessFunction(*next); queue_.erase(next); } } void ProcessFunction(int func_index); void EnqueueCallees(const std::vector& feedback) { for (size_t i = 0; i < feedback.size(); i++) { const CallSiteFeedback& csf = feedback[i]; for (int j = 0; j < csf.num_cases(); j++) { int func = csf.function_index(j); // Don't spend time on calls that have never been executed. if (csf.call_count(j) == 0) continue; // Don't recompute feedback that has already been processed. auto existing = feedback_for_function_.find(func); if (existing != feedback_for_function_.end() && existing->second.feedback_vector.size() > 0) { continue; } queue_.insert(func); } } } DisallowGarbageCollection no_gc_scope_; WasmInstanceObject instance_; const WasmModule* const module_; base::MutexGuard mutex_guard; std::unordered_map& feedback_for_function_; std::set queue_; }; class FeedbackMaker { public: FeedbackMaker(WasmInstanceObject instance, int func_index, int num_calls) : instance_(instance), num_imported_functions_( static_cast(instance.module()->num_imported_functions)), func_index_(func_index) { result_.reserve(num_calls); } void AddCandidate(Object maybe_function, int count) { if (!maybe_function.IsWasmInternalFunction()) return; WasmInternalFunction function = WasmInternalFunction::cast(maybe_function); if (!WasmExportedFunction::IsWasmExportedFunction(function.external())) { return; } WasmExportedFunction target = WasmExportedFunction::cast(function.external()); if (target.instance() != instance_) return; if (target.function_index() < num_imported_functions_) return; AddCall(target.function_index(), count); } void AddCall(int target, int count) { // Keep the cache sorted (using insertion-sort), highest count first. int insertion_index = 0; while (insertion_index < cache_usage_ && counts_cache_[insertion_index] >= count) { insertion_index++; } for (int shifted_index = cache_usage_ - 1; shifted_index >= insertion_index; shifted_index--) { targets_cache_[shifted_index + 1] = targets_cache_[shifted_index]; counts_cache_[shifted_index + 1] = counts_cache_[shifted_index]; } targets_cache_[insertion_index] = target; counts_cache_[insertion_index] = count; cache_usage_++; } void FinalizeCall() { if (cache_usage_ == 0) { result_.emplace_back(); } else if (cache_usage_ == 1) { if (v8_flags.trace_wasm_speculative_inlining) { PrintF("[Function #%d call_ref #%zu inlineable (monomorphic)]\n", func_index_, result_.size()); } result_.emplace_back(targets_cache_[0], counts_cache_[0]); } else { if (v8_flags.trace_wasm_speculative_inlining) { PrintF("[Function #%d call_ref #%zu inlineable (polymorphic %d)]\n", func_index_, result_.size(), cache_usage_); } CallSiteFeedback::PolymorphicCase* polymorphic = new CallSiteFeedback::PolymorphicCase[cache_usage_]; for (int i = 0; i < cache_usage_; i++) { polymorphic[i].function_index = targets_cache_[i]; polymorphic[i].absolute_call_frequency = counts_cache_[i]; } result_.emplace_back(polymorphic, cache_usage_); } cache_usage_ = 0; } // {GetResult} can only be called on a r-value reference to make it more // obvious at call sites that {this} should not be used after this operation. std::vector&& GetResult() && { return std::move(result_); } private: const WasmInstanceObject instance_; std::vector result_; const int num_imported_functions_; const int func_index_; int cache_usage_{0}; int targets_cache_[kMaxPolymorphism]; int counts_cache_[kMaxPolymorphism]; }; void TransitiveTypeFeedbackProcessor::ProcessFunction(int func_index) { int which_vector = declared_function_index(module_, func_index); Object maybe_feedback = instance_.feedback_vectors().get(which_vector); if (!maybe_feedback.IsFixedArray()) return; FixedArray feedback = FixedArray::cast(maybe_feedback); base::Vector call_direct_targets = module_->type_feedback.feedback_for_function[func_index] .call_targets.as_vector(); DCHECK_EQ(feedback.length(), call_direct_targets.size() * 2); FeedbackMaker fm(instance_, func_index, feedback.length() / 2); for (int i = 0; i < feedback.length(); i += 2) { Object value = feedback.get(i); if (value.IsWasmInternalFunction()) { // Monomorphic. int count = Smi::cast(feedback.get(i + 1)).value(); fm.AddCandidate(value, count); } else if (value.IsFixedArray()) { // Polymorphic. FixedArray polymorphic = FixedArray::cast(value); for (int j = 0; j < polymorphic.length(); j += 2) { Object function = polymorphic.get(j); int count = Smi::cast(polymorphic.get(j + 1)).value(); fm.AddCandidate(function, count); } } else if (value.IsSmi()) { // Uninitialized, or a direct call collecting call count. uint32_t target = call_direct_targets[i / 2]; if (target != FunctionTypeFeedback::kNonDirectCall) { int count = Smi::cast(value).value(); fm.AddCall(static_cast(target), count); } else if (v8_flags.trace_wasm_speculative_inlining) { PrintF("[Function #%d call #%d: uninitialized]\n", func_index, i / 2); } } else if (v8_flags.trace_wasm_speculative_inlining) { if (value == ReadOnlyRoots(instance_.GetIsolate()).megamorphic_symbol()) { PrintF("[Function #%d call #%d: megamorphic]\n", func_index, i / 2); } } fm.FinalizeCall(); } std::vector result = std::move(fm).GetResult(); EnqueueCallees(result); feedback_for_function_[func_index].feedback_vector = std::move(result); } void TriggerTierUp(WasmInstanceObject instance, int func_index) { NativeModule* native_module = instance.module_object().native_module(); CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan, kNoDebugging}; const WasmModule* module = native_module->module(); int priority; { base::MutexGuard mutex_guard(&module->type_feedback.mutex); int array_index = wasm::declared_function_index(instance.module(), func_index); instance.tiering_budget_array()[array_index] = v8_flags.wasm_tiering_budget; int& stored_priority = module->type_feedback.feedback_for_function[func_index].tierup_priority; if (stored_priority < kMaxInt) ++stored_priority; priority = stored_priority; } // Only create a compilation unit if this is the first time we detect this // function as hot (priority == 1), or if the priority increased // significantly. The latter is assumed to be the case if the priority // increased at least to four, and is a power of two. if (priority == 2 || !base::bits::IsPowerOfTwo(priority)) return; // Before adding the tier-up unit or increasing priority, do process type // feedback for best code generation. if (v8_flags.wasm_speculative_inlining) { // TODO(jkummerow): we could have collisions here if different instances // of the same module have collected different feedback. If that ever // becomes a problem, figure out a solution. TransitiveTypeFeedbackProcessor::Process(instance, func_index); } compilation_state->AddTopTierPriorityCompilationUnit(tiering_unit, priority); } void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance, int func_index) { if (v8_flags.wasm_speculative_inlining) { TransitiveTypeFeedbackProcessor::Process(instance, func_index); } auto* native_module = instance.module_object().native_module(); wasm::GetWasmEngine()->CompileFunction(isolate, native_module, func_index, wasm::ExecutionTier::kTurbofan); CHECK(!native_module->compilation_state()->failed()); } namespace { void RecordStats(CodeT codet, Counters* counters) { if (codet.is_off_heap_trampoline()) return; Code code = FromCodeT(codet); counters->wasm_generated_code_size()->Increment(code.raw_body_size()); counters->wasm_reloc_size()->Increment(code.relocation_info().length()); } enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield }; CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits( std::weak_ptr native_module, JobDelegate* delegate) { std::shared_ptr wrapper_unit = nullptr; int num_processed_wrappers = 0; OperationsBarrier::Token wrapper_compilation_token; Isolate* isolate; { BackgroundCompileScope compile_scope(native_module); if (compile_scope.cancelled()) return kYield; wrapper_unit = compile_scope.compilation_state() ->GetNextJSToWasmWrapperCompilationUnit(); if (!wrapper_unit) return kNoMoreUnits; isolate = wrapper_unit->isolate(); wrapper_compilation_token = wasm::GetWasmEngine()->StartWrapperCompilation(isolate); if (!wrapper_compilation_token) return kNoMoreUnits; } TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation"); while (true) { DCHECK_EQ(isolate, wrapper_unit->isolate()); wrapper_unit->Execute(); ++num_processed_wrappers; bool yield = delegate && delegate->ShouldYield(); BackgroundCompileScope compile_scope(native_module); if (compile_scope.cancelled()) return kYield; if (yield || !(wrapper_unit = compile_scope.compilation_state() ->GetNextJSToWasmWrapperCompilationUnit())) { compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits( num_processed_wrappers); return yield ? kYield : kNoMoreUnits; } } } namespace { const char* GetCompilationEventName(const WasmCompilationUnit& unit, const CompilationEnv& env) { ExecutionTier tier = unit.tier(); if (tier == ExecutionTier::kLiftoff) { return "wasm.BaselineCompilation"; } if (tier == ExecutionTier::kTurbofan) { return "wasm.TopTierCompilation"; } if (unit.func_index() < static_cast(env.module->num_imported_functions)) { return "wasm.WasmToJSWrapperCompilation"; } return "wasm.OtherCompilation"; } } // namespace constexpr uint8_t kMainTaskId = 0; // Run by the {BackgroundCompileJob} (on any thread). CompilationExecutionResult ExecuteCompilationUnits( std::weak_ptr native_module, Counters* counters, JobDelegate* delegate, CompileBaselineOnly baseline_only) { TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits"); // Execute JS to Wasm wrapper units first, so that they are ready to be // finalized by the main thread when the kFinishedBaselineCompilation event is // triggered. if (ExecuteJSToWasmWrapperCompilationUnits(native_module, delegate) == kYield) { return kYield; } // These fields are initialized in a {BackgroundCompileScope} before // starting compilation. base::Optional env; std::shared_ptr wire_bytes; std::shared_ptr module; // Task 0 is any main thread (there might be multiple from multiple isolates), // worker threads start at 1 (thus the "+ 1"). static_assert(kMainTaskId == 0); int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : kMainTaskId; DCHECK_LE(0, task_id); CompilationUnitQueues::Queue* queue; base::Optional unit; WasmFeatures detected_features = WasmFeatures::None(); base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported() ? base::ThreadTicks::Now() : base::ThreadTicks(); // Preparation (synchronized): Initialize the fields above and get the first // compilation unit. { BackgroundCompileScope compile_scope(native_module); if (compile_scope.cancelled()) return kYield; env.emplace(compile_scope.native_module()->CreateCompilationEnv()); wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage(); module = compile_scope.native_module()->shared_module(); queue = compile_scope.compilation_state()->GetQueueForCompileTask(task_id); unit = compile_scope.compilation_state()->GetNextCompilationUnit( queue, baseline_only); if (!unit) return kNoMoreUnits; } TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id); // If PKU is enabled, use an assembler buffer cache to avoid many expensive // buffer allocations. Otherwise, malloc/free is efficient enough to prefer // that bit of overhead over the memory consumption increase by the cache. base::Optional optional_assembler_buffer_cache; AssemblerBufferCache* assembler_buffer_cache = nullptr; // Also, open a CodeSpaceWriteScope now to have (thread-local) write access to // the assembler buffers. base::Optional write_scope_for_assembler_buffers; if (WasmCodeManager::MemoryProtectionKeysEnabled()) { optional_assembler_buffer_cache.emplace(); assembler_buffer_cache = &*optional_assembler_buffer_cache; write_scope_for_assembler_buffers.emplace(nullptr); } std::vector results_to_publish; while (true) { ExecutionTier current_tier = unit->tier(); const char* event_name = GetCompilationEventName(unit.value(), env.value()); TRACE_EVENT0("v8.wasm", event_name); while (unit->tier() == current_tier) { // (asynchronous): Execute the compilation. WasmCompilationResult result = unit->ExecuteCompilation(&env.value(), wire_bytes.get(), counters, assembler_buffer_cache, &detected_features); results_to_publish.emplace_back(std::move(result)); bool yield = delegate && delegate->ShouldYield(); // (synchronized): Publish the compilation result and get the next unit. BackgroundCompileScope compile_scope(native_module); if (compile_scope.cancelled()) return kYield; if (!results_to_publish.back().succeeded()) { compile_scope.compilation_state()->SetError(); return kNoMoreUnits; } if (!unit->for_debugging() && result.result_tier != current_tier) { compile_scope.native_module()->AddLiftoffBailout(); } // Yield or get next unit. if (yield || !(unit = compile_scope.compilation_state()->GetNextCompilationUnit( queue, baseline_only))) { if (!thread_ticks.IsNull()) { compile_scope.native_module()->UpdateCPUDuration( (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(), current_tier); } std::vector> unpublished_code = compile_scope.native_module()->AddCompiledCode( base::VectorOf(std::move(results_to_publish))); results_to_publish.clear(); compile_scope.compilation_state()->SchedulePublishCompilationResults( std::move(unpublished_code)); compile_scope.compilation_state()->OnCompilationStopped( detected_features); return yield ? kYield : kNoMoreUnits; } // Publish after finishing a certain amount of units, to avoid contention // when all threads publish at the end. bool batch_full = queue->ShouldPublish(static_cast(results_to_publish.size())); // Also publish each time the compilation tier changes from Liftoff to // TurboFan, such that we immediately publish the baseline compilation // results to start execution, and do not wait for a batch to fill up. bool liftoff_finished = unit->tier() != current_tier && unit->tier() == ExecutionTier::kTurbofan; if (batch_full || liftoff_finished) { if (!thread_ticks.IsNull()) { base::ThreadTicks thread_ticks_now = base::ThreadTicks::Now(); compile_scope.native_module()->UpdateCPUDuration( (thread_ticks_now - thread_ticks).InMicroseconds(), current_tier); thread_ticks = thread_ticks_now; } std::vector> unpublished_code = compile_scope.native_module()->AddCompiledCode( base::VectorOf(std::move(results_to_publish))); results_to_publish.clear(); compile_scope.compilation_state()->SchedulePublishCompilationResults( std::move(unpublished_code)); } } } UNREACHABLE(); } // (function is imported, canonical type index) using JSToWasmWrapperKey = std::pair; // Returns the number of units added. int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module, CompilationUnitBuilder* builder) { std::unordered_set> keys; for (auto exp : native_module->module()->export_table) { if (exp.kind != kExternalFunction) continue; auto& function = native_module->module()->functions[exp.index]; uint32_t canonical_type_index = native_module->module() ->isorecursive_canonical_type_ids[function.sig_index]; JSToWasmWrapperKey key(function.imported, canonical_type_index); if (keys.insert(key).second) { auto unit = std::make_shared( isolate, function.sig, canonical_type_index, native_module->module(), function.imported, native_module->enabled_features(), JSToWasmWrapperCompilationUnit::kAllowGeneric); builder->AddJSToWasmWrapperUnit(std::move(unit)); } } return static_cast(keys.size()); } // Returns the number of units added. int AddImportWrapperUnits(NativeModule* native_module, CompilationUnitBuilder* builder) { std::unordered_set keys; int num_imported_functions = native_module->num_imported_functions(); for (int func_index = 0; func_index < num_imported_functions; func_index++) { const WasmFunction& function = native_module->module()->functions[func_index]; if (!IsJSCompatibleSignature(function.sig, native_module->module(), native_module->enabled_features())) { continue; } uint32_t canonical_type_index = native_module->module() ->isorecursive_canonical_type_ids[function.sig_index]; WasmImportWrapperCache::CacheKey key( compiler::kDefaultImportCallKind, canonical_type_index, static_cast(function.sig->parameter_count()), kNoSuspend); auto it = keys.insert(key); if (it.second) { // Ensure that all keys exist in the cache, so that we can populate the // cache later without locking. (*native_module->import_wrapper_cache())[key] = nullptr; builder->AddImportUnit(func_index); } } return static_cast(keys.size()); } void InitializeLazyCompilation(NativeModule* native_module) { const bool lazy_module = IsLazyModule(native_module->module()); auto* module = native_module->module(); uint32_t start = module->num_imported_functions; uint32_t end = start + module->num_declared_functions; base::Optional lazy_code_space_write_scope; for (uint32_t func_index = start; func_index < end; func_index++) { CompileStrategy strategy = GetCompileStrategy( module, native_module->enabled_features(), func_index, lazy_module); if (strategy == CompileStrategy::kLazy || strategy == CompileStrategy::kLazyBaselineEagerTopTier) { // Open a single scope for all following calls to {UseLazyStub()}, instead // of flipping page permissions for each {func_index} individually. if (!lazy_code_space_write_scope.has_value()) { lazy_code_space_write_scope.emplace(native_module); } native_module->UseLazyStub(func_index); } } } std::unique_ptr InitializeCompilation( Isolate* isolate, NativeModule* native_module) { InitializeLazyCompilation(native_module); CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); auto builder = std::make_unique(native_module); int num_import_wrappers = AddImportWrapperUnits(native_module, builder.get()); int num_export_wrappers = AddExportWrapperUnits(isolate, native_module, builder.get()); compilation_state->InitializeCompilationProgress(num_import_wrappers, num_export_wrappers); return builder; } bool MayCompriseLazyFunctions(const WasmModule* module, const WasmFeatures& enabled_features, bool lazy_module) { if (lazy_module || enabled_features.has_compilation_hints()) return true; #ifdef ENABLE_SLOW_DCHECKS int start = module->num_imported_functions; int end = start + module->num_declared_functions; for (int func_index = start; func_index < end; func_index++) { SLOW_DCHECK(GetCompileStrategy(module, enabled_features, func_index, lazy_module) != CompileStrategy::kLazy); } #endif return false; } class CompilationTimeCallback : public CompilationEventCallback { public: enum CompileMode { kSynchronous, kAsync, kStreaming }; explicit CompilationTimeCallback( std::shared_ptr async_counters, std::shared_ptr metrics_recorder, v8::metrics::Recorder::ContextId context_id, std::weak_ptr native_module, CompileMode compile_mode) : start_time_(base::TimeTicks::Now()), async_counters_(std::move(async_counters)), metrics_recorder_(std::move(metrics_recorder)), context_id_(context_id), native_module_(std::move(native_module)), compile_mode_(compile_mode) {} void call(CompilationEvent compilation_event) override { DCHECK(base::TimeTicks::IsHighResolution()); std::shared_ptr native_module = native_module_.lock(); if (!native_module) return; auto now = base::TimeTicks::Now(); auto duration = now - start_time_; if (compilation_event == CompilationEvent::kFinishedBaselineCompilation) { // Reset {start_time_} to measure tier-up time. start_time_ = now; if (compile_mode_ != kSynchronous) { TimedHistogram* histogram = compile_mode_ == kAsync ? async_counters_->wasm_async_compile_wasm_module_time() : async_counters_->wasm_streaming_compile_wasm_module_time(); histogram->AddSample(static_cast(duration.InMicroseconds())); } v8::metrics::WasmModuleCompiled event{ (compile_mode_ != kSynchronous), // async (compile_mode_ == kStreaming), // streamed false, // cached false, // deserialized v8_flags.wasm_lazy_compilation, // lazy true, // success native_module->liftoff_code_size(), // code_size_in_bytes native_module->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds(), // wall_clock_duration_in_us static_cast( // cpu_time_duration_in_us native_module->baseline_compilation_cpu_duration())}; metrics_recorder_->DelayMainThreadEvent(event, context_id_); } if (compilation_event == CompilationEvent::kFailedCompilation) { v8::metrics::WasmModuleCompiled event{ (compile_mode_ != kSynchronous), // async (compile_mode_ == kStreaming), // streamed false, // cached false, // deserialized v8_flags.wasm_lazy_compilation, // lazy false, // success native_module->liftoff_code_size(), // code_size_in_bytes native_module->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds(), // wall_clock_duration_in_us static_cast( // cpu_time_duration_in_us native_module->baseline_compilation_cpu_duration())}; metrics_recorder_->DelayMainThreadEvent(event, context_id_); } } private: base::TimeTicks start_time_; const std::shared_ptr async_counters_; std::shared_ptr metrics_recorder_; v8::metrics::Recorder::ContextId context_id_; std::weak_ptr native_module_; const CompileMode compile_mode_; }; void CompileNativeModule(Isolate* isolate, v8::metrics::Recorder::ContextId context_id, ErrorThrower* thrower, const WasmModule* wasm_module, std::shared_ptr native_module) { CHECK(!v8_flags.jitless); ModuleWireBytes wire_bytes(native_module->wire_bytes()); const bool lazy_module = IsLazyModule(wasm_module); if (!v8_flags.wasm_lazy_validation && wasm_module->origin == kWasmOrigin && MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(), lazy_module)) { // Validate wasm modules for lazy compilation if requested. Never validate // asm.js modules as these are valid by construction (additionally a CHECK // will catch this during lazy compilation). ValidateSequentially(wasm_module, native_module.get(), isolate->counters(), isolate->allocator(), thrower, lazy_module, kOnlyLazyFunctions); // On error: Return and leave the module in an unexecutable state. if (thrower->error()) return; } DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions); // The callback captures a shared ptr to the semaphore. auto* compilation_state = Impl(native_module->compilation_state()); if (base::TimeTicks::IsHighResolution()) { compilation_state->AddCallback(std::make_unique( isolate->async_counters(), isolate->metrics_recorder(), context_id, native_module, CompilationTimeCallback::kSynchronous)); } // Initialize the compilation units and kick off background compile tasks. std::unique_ptr builder = InitializeCompilation(isolate, native_module.get()); compilation_state->InitializeCompilationUnits(std::move(builder)); compilation_state->WaitForCompilationEvent( CompilationEvent::kFinishedExportWrappers); if (compilation_state->failed()) { DCHECK_IMPLIES(lazy_module, !v8_flags.wasm_lazy_validation); ValidateSequentially(wasm_module, native_module.get(), isolate->counters(), isolate->allocator(), thrower, lazy_module); CHECK(thrower->error()); return; } compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module()); compilation_state->WaitForCompilationEvent( CompilationEvent::kFinishedBaselineCompilation); compilation_state->PublishDetectedFeatures(isolate); if (compilation_state->failed()) { DCHECK_IMPLIES(lazy_module, !v8_flags.wasm_lazy_validation); ValidateSequentially(wasm_module, native_module.get(), isolate->counters(), isolate->allocator(), thrower, lazy_module); CHECK(thrower->error()); } } class BackgroundCompileJob final : public JobTask { public: explicit BackgroundCompileJob(std::weak_ptr native_module, std::shared_ptr async_counters) : native_module_(std::move(native_module)), engine_barrier_(GetWasmEngine()->GetBarrierForBackgroundCompile()), async_counters_(std::move(async_counters)) {} void Run(JobDelegate* delegate) override { auto engine_scope = engine_barrier_->TryLock(); if (!engine_scope) return; ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate, kBaselineOrTopTier); } size_t GetMaxConcurrency(size_t worker_count) const override { BackgroundCompileScope compile_scope(native_module_); if (compile_scope.cancelled()) return 0; // NumOutstandingCompilations() does not reflect the units that running // workers are processing, thus add the current worker count to that number. return std::min( static_cast(v8_flags.wasm_num_compilation_tasks), worker_count + compile_scope.compilation_state()->NumOutstandingCompilations()); } private: std::weak_ptr native_module_; std::shared_ptr engine_barrier_; const std::shared_ptr async_counters_; }; } // namespace std::shared_ptr CompileToNativeModule( Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower, std::shared_ptr module, const ModuleWireBytes& wire_bytes, int compilation_id, v8::metrics::Recorder::ContextId context_id) { const WasmModule* wasm_module = module.get(); WasmEngine* engine = GetWasmEngine(); base::OwnedVector wire_bytes_copy = base::OwnedVector::Of(wire_bytes.module_bytes()); // Prefer {wire_bytes_copy} to {wire_bytes.module_bytes()} for the temporary // cache key. When we eventually install the module in the cache, the wire // bytes of the temporary key and the new key have the same base pointer and // we can skip the full bytes comparison. std::shared_ptr native_module = engine->MaybeGetNativeModule( wasm_module->origin, wire_bytes_copy.as_vector(), isolate); if (native_module) { CompileJsToWasmWrappers(isolate, wasm_module); return native_module; } base::Optional wasm_compile_module_time_scope; if (base::TimeTicks::IsHighResolution()) { wasm_compile_module_time_scope.emplace(SELECT_WASM_COUNTER( isolate->counters(), wasm_module->origin, wasm_compile, module_time)); } // Embedder usage count for declared shared memories. if (wasm_module->has_shared_memory) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory); } // Create a new {NativeModule} first. const bool include_liftoff = module->origin == kWasmOrigin && v8_flags.liftoff; size_t code_size_estimate = wasm::WasmCodeManager::EstimateNativeModuleCodeSize( module.get(), include_liftoff, DynamicTiering{v8_flags.wasm_dynamic_tiering.value()}); native_module = engine->NewNativeModule(isolate, enabled, module, code_size_estimate); native_module->SetWireBytes(std::move(wire_bytes_copy)); native_module->compilation_state()->set_compilation_id(compilation_id); // Sync compilation is user blocking, so we increase the priority. native_module->compilation_state()->SetHighPriority(); CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module); bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(), &native_module, isolate); if (thrower->error()) return {}; if (cache_hit) { CompileJsToWasmWrappers(isolate, wasm_module); return native_module; } // Ensure that the code objects are logged before returning. engine->LogOutstandingCodesForIsolate(isolate); return native_module; } void RecompileNativeModule(NativeModule* native_module, TieringState tiering_state) { // Install a callback to notify us once background recompilation finished. auto recompilation_finished_semaphore = std::make_shared(0); auto* compilation_state = Impl(native_module->compilation_state()); class RecompilationFinishedCallback : public CompilationEventCallback { public: explicit RecompilationFinishedCallback( std::shared_ptr recompilation_finished_semaphore) : recompilation_finished_semaphore_( std::move(recompilation_finished_semaphore)) {} void call(CompilationEvent event) override { DCHECK_NE(CompilationEvent::kFailedCompilation, event); if (event == CompilationEvent::kFinishedRecompilation) { recompilation_finished_semaphore_->Signal(); } } private: std::shared_ptr recompilation_finished_semaphore_; }; // The callback captures a shared ptr to the semaphore. // Initialize the compilation units and kick off background compile tasks. compilation_state->InitializeRecompilation( tiering_state, std::make_unique( recompilation_finished_semaphore)); constexpr JobDelegate* kNoDelegate = nullptr; ExecuteCompilationUnits(compilation_state->native_module_weak(), compilation_state->counters(), kNoDelegate, kBaselineOnly); recompilation_finished_semaphore->Wait(); DCHECK(!compilation_state->failed()); } AsyncCompileJob::AsyncCompileJob( Isolate* isolate, const WasmFeatures& enabled, std::unique_ptr bytes_copy, size_t length, Handle context, Handle incumbent_context, const char* api_method_name, std::shared_ptr resolver, int compilation_id) : isolate_(isolate), api_method_name_(api_method_name), enabled_features_(enabled), dynamic_tiering_(DynamicTiering{v8_flags.wasm_dynamic_tiering.value()}), wasm_lazy_compilation_(v8_flags.wasm_lazy_compilation), start_time_(base::TimeTicks::Now()), bytes_copy_(std::move(bytes_copy)), wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length), resolver_(std::move(resolver)), compilation_id_(compilation_id) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.AsyncCompileJob"); CHECK(v8_flags.wasm_async_compilation); CHECK(!v8_flags.jitless); v8::Isolate* v8_isolate = reinterpret_cast(isolate); v8::Platform* platform = V8::GetCurrentPlatform(); foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate); native_context_ = isolate->global_handles()->Create(context->native_context()); incumbent_context_ = isolate->global_handles()->Create(*incumbent_context); DCHECK(native_context_->IsNativeContext()); context_id_ = isolate->GetOrRegisterRecorderContextId(native_context_); metrics_event_.async = true; } void AsyncCompileJob::Start() { DoAsync(isolate_->counters(), isolate_->metrics_recorder()); // -- } void AsyncCompileJob::Abort() { // Removing this job will trigger the destructor, which will cancel all // compilation. GetWasmEngine()->RemoveCompileJob(this); } class AsyncStreamingProcessor final : public StreamingProcessor { public: explicit AsyncStreamingProcessor(AsyncCompileJob* job, std::shared_ptr counters, AccountingAllocator* allocator); ~AsyncStreamingProcessor() override; bool ProcessModuleHeader(base::Vector bytes, uint32_t offset) override; bool ProcessSection(SectionCode section_code, base::Vector bytes, uint32_t offset) override; bool ProcessCodeSectionHeader(int num_functions, uint32_t functions_mismatch_error_offset, std::shared_ptr, int code_section_start, int code_section_length) override; void ProcessFunctionBody(base::Vector bytes, uint32_t offset) override; void OnFinishedChunk() override; void OnFinishedStream(base::OwnedVector bytes) override; void OnError(const WasmError&) override; void OnAbort() override; bool Deserialize(base::Vector wire_bytes, base::Vector module_bytes) override; private: enum ErrorLocation { kErrorInFunction, kErrorInSection }; // Finishes the AsyncCompileJob with an error. void FinishAsyncCompileJobWithError( const WasmError&, ErrorLocation error_location = kErrorInSection); void CommitCompilationUnits(); ModuleDecoder decoder_; AsyncCompileJob* job_; std::unique_ptr compilation_unit_builder_; int num_functions_ = 0; bool prefix_cache_hit_ = false; bool before_code_section_ = true; std::shared_ptr async_counters_; AccountingAllocator* allocator_; // Running hash of the wire bytes up to code section size, but excluding the // code section itself. Used by the {NativeModuleCache} to detect potential // duplicate modules. size_t prefix_hash_; }; std::shared_ptr AsyncCompileJob::CreateStreamingDecoder() { DCHECK_NULL(stream_); stream_ = StreamingDecoder::CreateAsyncStreamingDecoder( std::make_unique( this, isolate_->async_counters(), isolate_->allocator())); return stream_; } AsyncCompileJob::~AsyncCompileJob() { // Note: This destructor always runs on the foreground thread of the isolate. background_task_manager_.CancelAndWait(); // If initial compilation did not finish yet we can abort it. if (native_module_) { Impl(native_module_->compilation_state()) ->CancelCompilation(CompilationStateImpl::kCancelInitialCompilation); } // Tell the streaming decoder that the AsyncCompileJob is not available // anymore. // TODO(ahaas): Is this notification really necessary? Check // https://crbug.com/888170. if (stream_) stream_->NotifyCompilationEnded(); CancelPendingForegroundTask(); isolate_->global_handles()->Destroy(native_context_.location()); isolate_->global_handles()->Destroy(incumbent_context_.location()); if (!module_object_.is_null()) { isolate_->global_handles()->Destroy(module_object_.location()); } } void AsyncCompileJob::CreateNativeModule( std::shared_ptr module, size_t code_size_estimate) { // Embedder usage count for declared shared memories. if (module->has_shared_memory) { isolate_->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory); } // TODO(wasm): Improve efficiency of storing module wire bytes. Only store // relevant sections, not function bodies // Create the module object and populate with compiled functions and // information needed at instantiation time. native_module_ = GetWasmEngine()->NewNativeModule( isolate_, enabled_features_, std::move(module), code_size_estimate); native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()}); native_module_->compilation_state()->set_compilation_id(compilation_id_); } bool AsyncCompileJob::GetOrCreateNativeModule( std::shared_ptr module, size_t code_size_estimate) { native_module_ = GetWasmEngine()->MaybeGetNativeModule( module->origin, wire_bytes_.module_bytes(), isolate_); if (native_module_ == nullptr) { CreateNativeModule(std::move(module), code_size_estimate); return false; } return true; } void AsyncCompileJob::PrepareRuntimeObjects() { // Create heap objects for script and module bytes to be stored in the // module object. Asm.js is not compiled asynchronously. DCHECK(module_object_.is_null()); auto source_url = stream_ ? base::VectorOf(stream_->url()) : base::Vector(); auto script = GetWasmEngine()->GetOrCreateScript(isolate_, native_module_, source_url); Handle module_object = WasmModuleObject::New(isolate_, native_module_, script); module_object_ = isolate_->global_handles()->Create(*module_object); } // This function assumes that it is executed in a HandleScope, and that a // context is set on the isolate. void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.FinishAsyncCompile"); bool is_after_deserialization = !module_object_.is_null(); auto compilation_state = Impl(native_module_->compilation_state()); if (!is_after_deserialization) { if (stream_) { stream_->NotifyNativeModuleCreated(native_module_); } PrepareRuntimeObjects(); } // Measure duration of baseline compilation or deserialization from cache. if (base::TimeTicks::IsHighResolution()) { base::TimeDelta duration = base::TimeTicks::Now() - start_time_; int duration_usecs = static_cast(duration.InMicroseconds()); isolate_->counters()->wasm_streaming_finish_wasm_module_time()->AddSample( duration_usecs); if (is_after_cache_hit || is_after_deserialization) { v8::metrics::WasmModuleCompiled event{ true, // async true, // streamed is_after_cache_hit, // cached is_after_deserialization, // deserialized wasm_lazy_compilation_, // lazy !compilation_state->failed(), // success native_module_->turbofan_code_size(), // code_size_in_bytes native_module_->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds(), // wall_clock_duration_in_us static_cast( // cpu_time_duration_in_us native_module_->baseline_compilation_cpu_duration())}; isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_); } } DCHECK(!isolate_->context().is_null()); // Finish the wasm script now and make it public to the debugger. Handle