summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/module-compiler.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/module-compiler.cc')
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1513
1 files changed, 954 insertions, 559 deletions
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index c39ca2406c..568f9eb0f8 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -4,10 +4,14 @@
#include "src/wasm/module-compiler.h"
+#include <algorithm>
+
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/base/enum-set.h"
#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
@@ -18,12 +22,12 @@
#include "src/task-utils.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
+#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -61,22 +65,23 @@ enum class CompileMode : uint8_t { kRegular, kTiering };
// on background compile jobs.
class BackgroundCompileToken {
public:
- explicit BackgroundCompileToken(NativeModule* native_module)
+ explicit BackgroundCompileToken(
+ const std::shared_ptr<NativeModule>& native_module)
: native_module_(native_module) {}
void Cancel() {
base::SharedMutexGuard<base::kExclusive> mutex_guard(&mutex_);
- native_module_ = nullptr;
+ native_module_.reset();
}
private:
friend class BackgroundCompileScope;
base::SharedMutex mutex_;
- NativeModule* native_module_;
+ std::weak_ptr<NativeModule> native_module_;
- NativeModule* StartScope() {
+ std::shared_ptr<NativeModule> StartScope() {
mutex_.LockShared();
- return native_module_;
+ return native_module_.lock();
}
void ExitScope() { mutex_.UnlockShared(); }
@@ -99,25 +104,197 @@ class BackgroundCompileScope {
NativeModule* native_module() {
DCHECK(!cancelled());
- return native_module_;
+ return native_module_.get();
}
inline CompilationStateImpl* compilation_state();
private:
BackgroundCompileToken* const token_;
- NativeModule* const native_module_;
+ // Keep the native module alive while in this scope.
+ std::shared_ptr<NativeModule> const native_module_;
+};
+
+enum CompileBaselineOnly : bool {
+ kBaselineOnly = true,
+ kBaselineOrTopTier = false
+};
+
+// A set of work-stealing queues (vectors of units). Each background compile
+// task owns one of the queues and steals from all others once its own queue
+// runs empty.
+class CompilationUnitQueues {
+ public:
+ explicit CompilationUnitQueues(int max_tasks) : queues_(max_tasks) {
+ DCHECK_LT(0, max_tasks);
+ for (int task_id = 0; task_id < max_tasks; ++task_id) {
+ queues_[task_id].next_steal_task_id_ = next_task_id(task_id);
+ }
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
+ }
+
+ std::unique_ptr<WasmCompilationUnit> GetNextUnit(
+ int task_id, CompileBaselineOnly baseline_only) {
+ DCHECK_LE(0, task_id);
+ DCHECK_GT(queues_.size(), task_id);
+
+ // As long as any lower-tier units are outstanding we need to steal them
+ // before executing own higher-tier units.
+ int max_tier = baseline_only ? kBaseline : kTopTier;
+ for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
+ Queue* queue = &queues_[task_id];
+ // First, check whether our own queue has a unit of the wanted tier. If
+ // so, return it, otherwise get the task id to steal from.
+ int steal_task_id;
+ {
+ base::MutexGuard mutex_guard(&queue->mutex_);
+ if (!queue->units_[tier].empty()) {
+ auto unit = std::move(queue->units_[tier].back());
+ queue->units_[tier].pop_back();
+ DecrementUnitCount(tier);
+ return unit;
+ }
+ steal_task_id = queue->next_steal_task_id_;
+ }
+
+ // Try to steal from all other queues. If none of this succeeds, the outer
+ // loop increases the tier and retries.
+ size_t steal_trials = queues_.size();
+ for (; steal_trials > 0;
+ --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
+ if (steal_task_id == task_id) continue;
+ if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
+ DecrementUnitCount(tier);
+ return unit;
+ }
+ }
+ }
+ return {};
+ }
+
+ void AddUnits(Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
+ DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
+ // Add to the individual queues in a round-robin fashion. No special care is
+ // taken to balance them; they will be balanced by work stealing.
+ int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
+ while (!next_queue_to_add.compare_exchange_weak(
+ queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
+ // Retry with updated {queue_to_add}.
+ }
+
+ Queue* queue = &queues_[queue_to_add];
+ base::MutexGuard guard(&queue->mutex_);
+ if (!baseline_units.empty()) {
+ queue->units_[kBaseline].insert(
+ queue->units_[kBaseline].end(),
+ std::make_move_iterator(baseline_units.begin()),
+ std::make_move_iterator(baseline_units.end()));
+ num_units_[kBaseline].fetch_add(baseline_units.size(),
+ std::memory_order_relaxed);
+ }
+ if (!top_tier_units.empty()) {
+ queue->units_[kTopTier].insert(
+ queue->units_[kTopTier].end(),
+ std::make_move_iterator(top_tier_units.begin()),
+ std::make_move_iterator(top_tier_units.end()));
+ num_units_[kTopTier].fetch_add(top_tier_units.size(),
+ std::memory_order_relaxed);
+ }
+ }
+
+ // Get the current total number of units in all queues. This is only a
+ // momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit
+ // if this method returns non-zero.
+ size_t GetTotalSize() const {
+ size_t total = 0;
+ for (auto& atomic_counter : num_units_) {
+ total += atomic_counter.load(std::memory_order_relaxed);
+ }
+ return total;
+ }
+
+ private:
+ // Store tier in int so we can easily loop over it:
+ static constexpr int kBaseline = 0;
+ static constexpr int kTopTier = 1;
+ static constexpr int kNumTiers = kTopTier + 1;
+
+ struct Queue {
+ base::Mutex mutex_;
+
+ // Protected by {mutex_}:
+ std::vector<std::unique_ptr<WasmCompilationUnit>> units_[kNumTiers];
+ int next_steal_task_id_;
+ // End of fields protected by {mutex_}.
+ };
+
+ std::vector<Queue> queues_;
+
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
+
+ int next_task_id(int task_id) const {
+ int next = task_id + 1;
+ return next == static_cast<int>(queues_.size()) ? 0 : next;
+ }
+
+ int GetLowestTierWithUnits() const {
+ for (int tier = 0; tier < kNumTiers; ++tier) {
+ if (num_units_[tier].load(std::memory_order_relaxed) > 0) return tier;
+ }
+ return kNumTiers;
+ }
+
+ void DecrementUnitCount(int tier) {
+ size_t old_units_count = num_units_[tier].fetch_sub(1);
+ DCHECK_LE(1, old_units_count);
+ USE(old_units_count);
+ }
+
+ // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
+ // first stolen unit (rest put in queue of {task_id}), or {nullptr} if
+ // {steal_from_task_id} had no units of {wanted_tier}.
+ std::unique_ptr<WasmCompilationUnit> StealUnitsAndGetFirst(
+ int task_id, int steal_from_task_id, int wanted_tier) {
+ DCHECK_NE(task_id, steal_from_task_id);
+ std::vector<std::unique_ptr<WasmCompilationUnit>> stolen;
+ {
+ Queue* steal_queue = &queues_[steal_from_task_id];
+ base::MutexGuard guard(&steal_queue->mutex_);
+ if (steal_queue->units_[wanted_tier].empty()) return {};
+ auto* steal_from_vector = &steal_queue->units_[wanted_tier];
+ size_t remaining = steal_from_vector->size() / 2;
+ stolen.assign(
+ std::make_move_iterator(steal_from_vector->begin()) + remaining,
+ std::make_move_iterator(steal_from_vector->end()));
+ steal_from_vector->resize(remaining);
+ }
+ DCHECK(!stolen.empty());
+ auto returned_unit = std::move(stolen.back());
+ stolen.pop_back();
+ Queue* queue = &queues_[task_id];
+ base::MutexGuard guard(&queue->mutex_);
+ auto* target_queue = &queue->units_[wanted_tier];
+ target_queue->insert(target_queue->end(),
+ std::make_move_iterator(stolen.begin()),
+ std::make_move_iterator(stolen.end()));
+ queue->next_steal_task_id_ = next_task_id(steal_from_task_id);
+ return returned_unit;
+ }
};
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
// compilation of functions.
-// It's public interface {CompilationState} lives in compilation-environment.h.
+// Its public interface {CompilationState} lives in compilation-environment.h.
class CompilationStateImpl {
public:
- CompilationStateImpl(NativeModule*, std::shared_ptr<Counters> async_counters);
- ~CompilationStateImpl();
+ CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters);
// Cancel all background compilation and wait for all tasks to finish. Call
// this before destructing this object.
@@ -126,7 +303,7 @@ class CompilationStateImpl {
// Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background
// compilation.
- void SetNumberOfFunctionsToCompile(int num_functions);
+ void SetNumberOfFunctionsToCompile(int num_functions, int num_lazy_functions);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -135,55 +312,36 @@ class CompilationStateImpl {
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
- std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
- std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units);
- std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit();
-
- void OnFinishedUnit(ExecutionTier, WasmCode*);
-
- void ReportDetectedFeatures(const WasmFeatures& detected);
- void OnBackgroundTaskStopped(const WasmFeatures& detected);
- void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
- void RestartBackgroundCompileTask();
+ Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units);
+ void AddTopTierCompilationUnit(std::unique_ptr<WasmCompilationUnit>);
+ std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit(
+ int task_id, CompileBaselineOnly baseline_only);
+
+ void OnFinishedUnit(WasmCode*);
+ void OnFinishedUnits(Vector<WasmCode*>);
+
+ void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
+ void UpdateDetectedFeatures(const WasmFeatures& detected);
+ void PublishDetectedFeatures(Isolate*);
void RestartBackgroundTasks();
- void SetError(uint32_t func_index, const WasmError& error);
+ void SetError();
bool failed() const {
- return compile_error_.load(std::memory_order_relaxed) != nullptr;
+ return compile_failed_.load(std::memory_order_relaxed);
}
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- return outstanding_baseline_units_ == 0 ||
- (compile_mode_ == CompileMode::kTiering &&
- outstanding_tiering_units_ == 0);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ return outstanding_baseline_functions_ == 0;
}
CompileMode compile_mode() const { return compile_mode_; }
+ Counters* counters() const { return async_counters_.get(); }
WasmFeatures* detected_features() { return &detected_features_; }
- // Call {GetCompileError} from foreground threads only, since we access
- // NativeModule::wire_bytes, which is set from the foreground thread once the
- // stream has finished.
- WasmError GetCompileError() {
- CompilationError* error = compile_error_.load(std::memory_order_acquire);
- DCHECK_NOT_NULL(error);
- std::ostringstream error_msg;
- error_msg << "Compiling wasm function \"";
- wasm::ModuleWireBytes wire_bytes(native_module_->wire_bytes());
- wasm::WireBytesRef name_ref = native_module_->module()->LookupFunctionName(
- wire_bytes, error->func_index);
- if (name_ref.is_set()) {
- wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
- error_msg.write(name.start(), name.length());
- } else {
- error_msg << "wasm-function[" << error->func_index << "]";
- }
- error_msg << "\" failed: " << error->error.message();
- return WasmError{error->error.offset(), error_msg.str()};
- }
-
void SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
base::MutexGuard guard(&mutex_);
@@ -196,23 +354,24 @@ class CompilationStateImpl {
return wire_bytes_storage_;
}
- private:
- struct CompilationError {
- uint32_t const func_index;
- WasmError const error;
- CompilationError(uint32_t func_index, WasmError error)
- : func_index(func_index), error(std::move(error)) {}
- };
+ const std::shared_ptr<BackgroundCompileToken>& background_compile_token()
+ const {
+ return background_compile_token_;
+ }
+ private:
NativeModule* const native_module_;
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
- // Compilation error, atomically updated, but at most once (nullptr -> error).
- // Uses acquire-release semantics (acquire on load, release on update).
- // For checking whether an error is set, relaxed semantics can be used.
- std::atomic<CompilationError*> compile_error_{nullptr};
+ // Compilation error, atomically updated. This flag can be updated and read
+ // using relaxed semantics.
+ std::atomic<bool> compile_failed_{false};
+
+ const int max_background_tasks_ = 0;
+
+ CompilationUnitQueues compilation_unit_queues_;
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
@@ -221,10 +380,8 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
- std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_;
- std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
-
- int num_background_tasks_ = 0;
+ // Set of unused task ids; <= {max_background_tasks_} many.
+ std::vector<int> available_task_ids_;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
@@ -249,13 +406,12 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
- int outstanding_baseline_units_ = 0;
- int outstanding_tiering_units_ = 0;
+ int outstanding_baseline_functions_ = 0;
+ int outstanding_top_tier_functions_ = 0;
+ std::vector<ExecutionTier> highest_execution_tier_;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- const int max_background_tasks_ = 0;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -284,9 +440,7 @@ CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
void CompilationState::AbortCompilation() { Impl(this)->AbortCompilation(); }
-void CompilationState::SetError(uint32_t func_index, const WasmError& error) {
- Impl(this)->SetError(func_index, error);
-}
+void CompilationState::SetError() { Impl(this)->SetError(); }
void CompilationState::SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
@@ -304,13 +458,18 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); }
-void CompilationState::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
- Impl(this)->OnFinishedUnit(tier, code);
+void CompilationState::OnFinishedUnit(WasmCode* code) {
+ Impl(this)->OnFinishedUnit(code);
+}
+
+void CompilationState::OnFinishedUnits(Vector<WasmCode*> code_vector) {
+ Impl(this)->OnFinishedUnits(code_vector);
}
// static
std::unique_ptr<CompilationState> CompilationState::New(
- NativeModule* native_module, std::shared_ptr<Counters> async_counters) {
+ const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters) {
return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
new CompilationStateImpl(native_module, std::move(async_counters))));
}
@@ -318,103 +477,123 @@ std::unique_ptr<CompilationState> CompilationState::New(
// End of PIMPL implementation of {CompilationState}.
//////////////////////////////////////////////////////
-WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
- int func_index) {
- base::ElapsedTimer compilation_timer;
- DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
-
- compilation_timer.Start();
-
- TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
-
- const uint8_t* module_start = native_module->wire_bytes().start();
-
- const WasmFunction* func = &native_module->module()->functions[func_index];
- FunctionBody func_body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
-
- ExecutionTier tier =
- WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
- WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
- CompilationEnv env = native_module->CreateCompilationEnv();
- WasmCompilationResult result = unit.ExecuteCompilation(
- &env, native_module->compilation_state()->GetWireBytesStorage(),
- isolate->counters(),
- Impl(native_module->compilation_state())->detected_features());
- WasmCode* code = unit.Publish(std::move(result), native_module);
-
- // During lazy compilation, we should never get compilation errors. The module
- // was verified before starting execution with lazy compilation.
- // This might be OOM, but then we cannot continue execution anyway.
- // TODO(clemensh): According to the spec, we can actually skip validation at
- // module creation time, and return a function that always traps here.
- CHECK(!native_module->compilation_state()->failed());
-
- if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
-
- int64_t func_size =
- static_cast<int64_t>(func->code.end_offset() - func->code.offset());
- int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds();
-
- auto counters = isolate->counters();
- counters->wasm_lazily_compiled_functions()->Increment();
+namespace {
- counters->wasm_lazy_compilation_throughput()->AddSample(
- compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
- : 0);
+ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
+ ExecutionTier default_tier) {
+ switch (hint) {
+ case WasmCompilationHintTier::kDefault:
+ return default_tier;
+ case WasmCompilationHintTier::kInterpreter:
+ return ExecutionTier::kInterpreter;
+ case WasmCompilationHintTier::kBaseline:
+ return ExecutionTier::kLiftoff;
+ case WasmCompilationHintTier::kOptimized:
+ return ExecutionTier::kTurbofan;
+ }
+ UNREACHABLE();
+}
- return code;
+const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
+ uint32_t func_index) {
+ DCHECK_LE(module->num_imported_functions, func_index);
+ uint32_t hint_index = func_index - module->num_imported_functions;
+ const std::vector<WasmCompilationHint>& compilation_hints =
+ module->compilation_hints;
+ if (hint_index < compilation_hints.size()) {
+ return &compilation_hints[hint_index];
+ }
+ return nullptr;
}
-Address CompileLazy(Isolate* isolate, NativeModule* native_module,
- uint32_t func_index) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
+bool IsLazyCompilation(const WasmModule* module,
+ const WasmFeatures& enabled_features,
+ uint32_t func_index) {
+ if (enabled_features.compilation_hints) {
+ const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
+ return hint != nullptr &&
+ hint->strategy == WasmCompilationHintStrategy::kLazy;
+ }
+ return false;
+}
- DCHECK(!native_module->lazy_compile_frozen());
+bool IsLazyCompilation(const WasmModule* module,
+ const NativeModule* native_module,
+ const WasmFeatures& enabled_features,
+ uint32_t func_index) {
+ if (native_module->lazy_compilation()) return true;
+ return IsLazyCompilation(module, enabled_features, func_index);
+}
- NativeModuleModificationScope native_module_modification_scope(native_module);
+struct ExecutionTierPair {
+ ExecutionTier baseline_tier;
+ ExecutionTier top_tier;
+};
- WasmCode* result = LazyCompileFunction(isolate, native_module, func_index);
- DCHECK_NOT_NULL(result);
- DCHECK_EQ(func_index, result->index());
+ExecutionTierPair GetRequestedExecutionTiers(
+ const WasmModule* module, CompileMode compile_mode,
+ const WasmFeatures& enabled_features, uint32_t func_index) {
+ ExecutionTierPair result;
+ switch (compile_mode) {
+ case CompileMode::kRegular:
+ result.baseline_tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(module);
+ result.top_tier = result.baseline_tier;
+ return result;
+ case CompileMode::kTiering:
+
+ // Default tiering behaviour.
+ result.baseline_tier = ExecutionTier::kLiftoff;
+ result.top_tier = ExecutionTier::kTurbofan;
+
+ // Check if compilation hints override default tiering behaviour.
+ if (enabled_features.compilation_hints) {
+ const WasmCompilationHint* hint =
+ GetCompilationHint(module, func_index);
+ if (hint != nullptr) {
+ result.baseline_tier = ApplyHintToExecutionTier(hint->baseline_tier,
+ result.baseline_tier);
+ result.top_tier =
+ ApplyHintToExecutionTier(hint->top_tier, result.top_tier);
+ }
+ }
- return result->instruction_start();
+ // Correct top tier if necessary.
+ static_assert(ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ if (result.baseline_tier > result.top_tier) {
+ result.top_tier = result.baseline_tier;
+ }
+ return result;
+ }
+ UNREACHABLE();
}
-namespace {
-
// The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// {CompilationStateImpl} when {Commit} is called.
class CompilationUnitBuilder {
public:
- explicit CompilationUnitBuilder(NativeModule* native_module,
- WasmEngine* wasm_engine)
+ explicit CompilationUnitBuilder(NativeModule* native_module)
: native_module_(native_module),
- wasm_engine_(wasm_engine),
default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
native_module->module())) {}
- void AddUnit(uint32_t func_index) {
- switch (compilation_state()->compile_mode()) {
- case CompileMode::kTiering:
- tiering_units_.emplace_back(
- CreateUnit(func_index, ExecutionTier::kOptimized));
- baseline_units_.emplace_back(
- CreateUnit(func_index, ExecutionTier::kBaseline));
- return;
- case CompileMode::kRegular:
- baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
- return;
+ void AddUnits(uint32_t func_index) {
+ ExecutionTierPair tiers = GetRequestedExecutionTiers(
+ native_module_->module(), compilation_state()->compile_mode(),
+ native_module_->enabled_features(), func_index);
+ baseline_units_.emplace_back(CreateUnit(func_index, tiers.baseline_tier));
+ if (tiers.baseline_tier != tiers.top_tier) {
+ tiering_units_.emplace_back(CreateUnit(func_index, tiers.top_tier));
}
- UNREACHABLE();
}
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty()) return false;
- compilation_state()->AddCompilationUnits(baseline_units_, tiering_units_);
+ compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
+ VectorOf(tiering_units_));
Clear();
return true;
}
@@ -427,8 +606,7 @@ class CompilationUnitBuilder {
private:
std::unique_ptr<WasmCompilationUnit> CreateUnit(uint32_t func_index,
ExecutionTier tier) {
- return base::make_unique<WasmCompilationUnit>(wasm_engine_, func_index,
- tier);
+ return base::make_unique<WasmCompilationUnit>(func_index, tier);
}
CompilationStateImpl* compilation_state() const {
@@ -436,170 +614,293 @@ class CompilationUnitBuilder {
}
NativeModule* const native_module_;
- WasmEngine* const wasm_engine_;
const ExecutionTier default_tier_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
-bool compile_lazy(const WasmModule* module) {
- return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+} // namespace
+
+void CompileLazy(Isolate* isolate, NativeModule* native_module,
+ uint32_t func_index) {
+ Counters* counters = isolate->counters();
+ HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
+
+ DCHECK(!native_module->lazy_compile_frozen());
+
+ base::ElapsedTimer compilation_timer;
+
+ NativeModuleModificationScope native_module_modification_scope(native_module);
+
+ DCHECK(!native_module->HasCode(static_cast<uint32_t>(func_index)));
+
+ compilation_timer.Start();
+
+ TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
+
+ const uint8_t* module_start = native_module->wire_bytes().start();
+
+ const WasmFunction* func = &native_module->module()->functions[func_index];
+ FunctionBody func_body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
+
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
+ ExecutionTierPair tiers = GetRequestedExecutionTiers(
+ native_module->module(), compilation_state->compile_mode(),
+ native_module->enabled_features(), func_index);
+
+ WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = baseline_unit.ExecuteCompilation(
+ isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
+ isolate->counters(), compilation_state->detected_features());
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* code = native_module->AddCompiledCode(std::move(result));
+
+ if (tiers.baseline_tier < tiers.top_tier) {
+ auto tiering_unit =
+ base::make_unique<WasmCompilationUnit>(func_index, tiers.top_tier);
+ compilation_state->AddTopTierCompilationUnit(std::move(tiering_unit));
+ }
+
+ // During lazy compilation, we should never get compilation errors. The module
+ // was verified before starting execution with lazy compilation.
+ // This might be OOM, but then we cannot continue execution anyway.
+ // TODO(clemensh): According to the spec, we can actually skip validation at
+ // module creation time, and return a function that always traps here.
+ CHECK(!compilation_state->failed());
+
+ // The code we just produced should be the one that was requested.
+ DCHECK_EQ(func_index, code->index());
+
+ if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
+
+ double func_kb = 1e-3 * func->code.length();
+ double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
+
+ counters->wasm_lazily_compiled_functions()->Increment();
+
+ int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
+ counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
}
+namespace {
+
void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-double MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- base::Time::kMillisecondsPerSecond;
-}
+constexpr int kMainThreadTaskId = -1;
+
+// Run by the main thread and background tasks to take part in compilation.
+// Returns whether any units were executed.
+bool ExecuteCompilationUnits(
+ const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
+ int task_id, CompileBaselineOnly baseline_only) {
+ TRACE_COMPILE("Compiling (task %d)...\n", task_id);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
+
+ const bool is_foreground = task_id == kMainThreadTaskId;
+ // The main thread uses task id 0, which might collide with one of the
+ // background tasks. This is fine, as it will only cause some contention on
+ // the one queue, but work otherwise.
+ if (is_foreground) task_id = 0;
+
+ Platform* platform = V8::GetCurrentPlatform();
+ // Deadline is in 50ms from now.
+ static constexpr double kBackgroundCompileTimeLimit =
+ 50.0 / base::Time::kMillisecondsPerSecond;
+ const double deadline =
+ platform->MonotonicallyIncreasingTime() + kBackgroundCompileTimeLimit;
+
+ // These fields are initialized in a {BackgroundCompileScope} before
+ // starting compilation.
+ base::Optional<CompilationEnv> env;
+ std::shared_ptr<WireBytesStorage> wire_bytes;
+ std::shared_ptr<const WasmModule> module;
+ WasmEngine* wasm_engine = nullptr;
+ std::unique_ptr<WasmCompilationUnit> unit;
+ WasmFeatures detected_features = kNoWasmFeatures;
+
+ auto stop = [is_foreground, task_id,
+ &detected_features](BackgroundCompileScope& compile_scope) {
+ if (is_foreground) {
+ compile_scope.compilation_state()->UpdateDetectedFeatures(
+ detected_features);
+ } else {
+ compile_scope.compilation_state()->OnBackgroundTaskStopped(
+ task_id, detected_features);
+ }
+ };
-// Run by each compilation task and by the main thread (i.e. in both
-// foreground and background threads).
-bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
- NativeModule* native_module,
- CompilationStateImpl* compilation_state,
- WasmFeatures* detected,
- Counters* counters) {
- DisallowHeapAccess no_heap_access;
+ // Preparation (synchronized): Initialize the fields above and get the first
+ // compilation unit.
+ {
+ BackgroundCompileScope compile_scope(token);
+ if (compile_scope.cancelled()) return false;
+ env.emplace(compile_scope.native_module()->CreateCompilationEnv());
+ wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
+ module = compile_scope.native_module()->shared_module();
+ wasm_engine = compile_scope.native_module()->engine();
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit(
+ task_id, baseline_only);
+ if (unit == nullptr) {
+ stop(compile_scope);
+ return false;
+ }
+ }
- std::unique_ptr<WasmCompilationUnit> unit =
- compilation_state->GetNextCompilationUnit();
- if (unit == nullptr) return false;
+ std::vector<WasmCompilationResult> results_to_publish;
+
+ auto publish_results = [&results_to_publish](
+ BackgroundCompileScope* compile_scope) {
+ if (results_to_publish.empty()) return;
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> code_vector =
+ compile_scope->native_module()->AddCompiledCode(
+ VectorOf(results_to_publish));
+ compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
+ results_to_publish.clear();
+ };
- WasmCompilationResult result = unit->ExecuteCompilation(
- env, compilation_state->GetWireBytesStorage(), counters, detected);
+ bool compilation_failed = false;
+ while (true) {
+ // (asynchronous): Execute the compilation.
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ results_to_publish.emplace_back(std::move(result));
- WasmCode* code = unit->Publish(std::move(result), native_module);
- compilation_state->OnFinishedUnit(unit->requested_tier(), code);
+ // (synchronized): Publish the compilation result and get the next unit.
+ {
+ BackgroundCompileScope compile_scope(token);
+ if (compile_scope.cancelled()) return true;
+ if (!results_to_publish.back().succeeded()) {
+ // Compile error.
+ compile_scope.compilation_state()->SetError();
+ stop(compile_scope);
+ compilation_failed = true;
+ break;
+ }
+ // Publish TurboFan units immediately to reduce peak memory consumption.
+ if (result.requested_tier == ExecutionTier::kTurbofan) {
+ publish_results(&compile_scope);
+ }
- return true;
-}
+ // Get next unit.
+ if (deadline < platform->MonotonicallyIncreasingTime()) {
+ unit = nullptr;
+ } else {
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit(
+ task_id, baseline_only);
+ }
-void InitializeCompilationUnits(NativeModule* native_module,
- WasmEngine* wasm_engine) {
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = native_module->module();
- CompilationUnitBuilder builder(native_module, wasm_engine);
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- for (uint32_t i = start; i < end; ++i) {
- builder.AddUnit(i);
+ if (unit == nullptr) {
+ publish_results(&compile_scope);
+ stop(compile_scope);
+ return true;
+ }
+ }
}
- builder.Commit();
+ // We only get here if compilation failed. Other exits return directly.
+ DCHECK(compilation_failed);
+ USE(compilation_failed);
+ token->Cancel();
+ return true;
}
-void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
- // Data structures for the parallel compilation.
-
- //-----------------------------------------------------------------------
- // For parallel compilation:
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units} within the
- // {compilation_state}. By adding units to the {compilation_state}, new
- // {BackgroundCompileTasks} instances are spawned which run on
- // the background threads.
- // 2) The background threads and the main thread pick one compilation unit at
- // a time and execute the parallel phase of the compilation unit.
-
- // Turn on the {CanonicalHandleScope} so that the background threads can
- // use the node cache.
- CanonicalHandleScope canonical(isolate);
-
- CompilationStateImpl* compilation_state =
- Impl(native_module->compilation_state());
- DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
- int num_wasm_functions =
- static_cast<int>(native_module->module()->num_declared_functions);
- compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
-
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units} within the
- // {compilation_state}. By adding units to the {compilation_state}, new
- // {BackgroundCompileTask} instances are spawned which run on
- // background threads.
- InitializeCompilationUnits(native_module, isolate->wasm_engine());
-
- // 2) The background threads and the main thread pick one compilation unit at
- // a time and execute the parallel phase of the compilation unit.
- WasmFeatures detected_features;
- CompilationEnv env = native_module->CreateCompilationEnv();
- // TODO(wasm): This might already execute TurboFan units on the main thread,
- // while waiting for baseline compilation to finish. This can introduce
- // additional delay.
- // TODO(wasm): This is a busy-wait loop once all units have started executing
- // in background threads. Replace by a semaphore / barrier.
- while (!compilation_state->failed() &&
- !compilation_state->baseline_compilation_finished()) {
- FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
- &detected_features, isolate->counters());
+DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
+ Vector<const uint8_t> code,
+ Counters* counters,
+ AccountingAllocator* allocator,
+ WasmFeatures enabled_features) {
+ const WasmFunction* func = &module->functions[func_index];
+ FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
+ DecodeResult result;
+ {
+ auto time_counter = SELECT_WASM_COUNTER(counters, module->origin,
+ wasm_decode, function_time);
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
+ WasmFeatures detected;
+ result =
+ VerifyWasmCode(allocator, enabled_features, module, &detected, body);
}
-
- // Publish features from the foreground and background tasks.
- compilation_state->PublishDetectedFeatures(isolate, detected_features);
+ return result;
}
-void CompileSequentially(Isolate* isolate, NativeModule* native_module,
- ErrorThrower* thrower) {
+enum class OnlyLazyFunctions : bool { kNo = false, kYes = true };
+
+void ValidateSequentially(
+ const WasmModule* module, NativeModule* native_module, Counters* counters,
+ AccountingAllocator* allocator, ErrorThrower* thrower,
+ OnlyLazyFunctions only_lazy_functions = OnlyLazyFunctions ::kNo) {
DCHECK(!thrower->error());
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ auto enabled_features = native_module->enabled_features();
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ // Skip non-lazy functions if requested.
+ if (only_lazy_functions == OnlyLazyFunctions::kYes &&
+ !IsLazyCompilation(module, native_module, enabled_features,
+ func_index)) {
+ continue;
+ }
+ ModuleWireBytes wire_bytes{native_module->wire_bytes()};
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
+ DecodeResult result = ValidateSingleFunction(
+ module, func_index, code, counters, allocator, enabled_features);
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = native_module->module();
- WasmFeatures detected = kNoWasmFeatures;
- auto* comp_state = Impl(native_module->compilation_state());
- ExecutionTier tier =
- WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
- for (const WasmFunction& func : module->functions) {
- if (func.imported) continue; // Imports are compiled at instantiation time.
-
- // Compile the function.
- WasmCompilationUnit::CompileWasmFunction(isolate, native_module, &detected,
- &func, tier);
- if (comp_state->failed()) {
- thrower->CompileFailed(comp_state->GetCompileError());
- break;
+ if (result.failed()) {
+ WasmName name = wire_bytes.GetNameOrNull(func, module);
+ if (name.start() == nullptr) {
+ thrower->CompileError(
+ "Compiling function #%d failed: %s @+%u", func->func_index,
+ result.error().message().c_str(), result.error().offset());
+ } else {
+ TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
+ thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
+ func->func_index, name.length(), name.start(),
+ result.error().message().c_str(),
+ result.error().offset());
+ }
}
}
- UpdateFeatureUseCounts(isolate, detected);
}
-void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
- ErrorThrower* thrower) {
- DCHECK(!thrower->error());
+void InitializeCompilationUnits(NativeModule* native_module) {
+ // Set number of functions that must be compiled to consider the module fully
+ // compiled.
+ auto wasm_module = native_module->module();
+ int num_functions = wasm_module->num_declared_functions;
+ DCHECK_IMPLIES(!native_module->enabled_features().compilation_hints,
+ wasm_module->num_lazy_compilation_hints == 0);
+ int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
+ compilation_state->SetNumberOfFunctionsToCompile(num_functions,
+ num_lazy_functions);
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module();
+ CompilationUnitBuilder builder(native_module);
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
- for (uint32_t i = start; i < end; ++i) {
- const WasmFunction& func = module->functions[i];
-
- const byte* base = wire_bytes.start();
- FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
- base + func.code.end_offset()};
- DecodeResult result;
- {
- auto time_counter = SELECT_WASM_COUNTER(
- isolate->counters(), module->origin, wasm_decode, function_time);
-
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
- WasmFeatures detected;
- result = VerifyWasmCode(isolate->allocator(),
- native_module->enabled_features(), module,
- &detected, body);
- }
- if (result.failed()) {
- TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
- thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
- name.length(), name.start(),
- result.error().message().c_str(),
- result.error().offset());
- break;
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ if (IsLazyCompilation(module, native_module,
+ native_module->enabled_features(), func_index)) {
+ native_module->UseLazyStub(func_index);
+ } else {
+ builder.AddUnits(func_index);
}
}
+ builder.Commit();
+}
+
+bool NeedsDeterministicCompile() {
+ return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
}
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
@@ -607,7 +908,8 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- if (compile_lazy(wasm_module)) {
+ if (FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && wasm_module->origin == kAsmJsOrigin)) {
if (wasm_module->origin == kWasmOrigin) {
// Validate wasm modules for lazy compilation. Don't validate asm.js
// modules, they are valid by construction (otherwise a CHECK will fail
@@ -615,29 +917,71 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// TODO(clemensh): According to the spec, we can actually skip validation
// at module creation time, and return a function that always traps at
// (lazy) compilation time.
- ValidateSequentially(isolate, native_module, thrower);
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower);
+ // On error: Return and leave the module in an unexecutable state.
if (thrower->error()) return;
}
+ native_module->set_lazy_compilation(true);
+ native_module->UseLazyStubs();
+ return;
+ }
- native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
- } else {
- size_t funcs_to_compile =
- wasm_module->functions.size() - wasm_module->num_imported_functions;
- bool compile_parallel =
- !FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
- funcs_to_compile > 1 &&
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
-
- if (compile_parallel) {
- CompileInParallel(isolate, native_module);
- } else {
- CompileSequentially(isolate, native_module, thrower);
- }
- auto* compilation_state = Impl(native_module->compilation_state());
- if (compilation_state->failed()) {
- thrower->CompileFailed(compilation_state->GetCompileError());
+ if (native_module->enabled_features().compilation_hints) {
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower,
+ OnlyLazyFunctions::kYes);
+ // On error: Return and leave the module in an unexecutable state.
+ if (thrower->error()) return;
+ }
+
+ // Turn on the {CanonicalHandleScope} so that the background threads can
+ // use the node cache.
+ CanonicalHandleScope canonical(isolate);
+
+ auto* compilation_state = Impl(native_module->compilation_state());
+ DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
+
+ // Install a callback to notify us once background compilation finished, or
+ // compilation failed.
+ auto baseline_finished_semaphore = std::make_shared<base::Semaphore>(0);
+ // The callback captures a shared ptr to the semaphore.
+ compilation_state->AddCallback(
+ [baseline_finished_semaphore](CompilationEvent event) {
+ if (event == CompilationEvent::kFinishedBaselineCompilation ||
+ event == CompilationEvent::kFailedCompilation) {
+ baseline_finished_semaphore->Signal();
+ }
+ });
+
+ // Initialize the compilation units and kick off background compile tasks.
+ InitializeCompilationUnits(native_module);
+
+ // If tiering is disabled, the main thread can execute any unit (all of them
+ // are part of initial compilation). Otherwise, just execute baseline units.
+ bool is_tiering = compilation_state->compile_mode() == CompileMode::kTiering;
+ auto baseline_only = is_tiering ? kBaselineOnly : kBaselineOrTopTier;
+ // The main threads contributes to the compilation, except if we need
+ // deterministic compilation; in that case, the single background task will
+ // execute all compilation.
+ if (!NeedsDeterministicCompile()) {
+ while (ExecuteCompilationUnits(
+ compilation_state->background_compile_token(), isolate->counters(),
+ kMainThreadTaskId, baseline_only)) {
+ // Continue executing compilation units.
}
}
+
+ // Now wait until baseline compilation finished.
+ baseline_finished_semaphore->Wait();
+
+ compilation_state->PublishDetectedFeatures(isolate);
+
+ if (compilation_state->failed()) {
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower);
+ CHECK(thrower->error());
+ }
}
// The runnable task that performs compilations in the background.
@@ -645,96 +989,27 @@ class BackgroundCompileTask : public CancelableTask {
public:
explicit BackgroundCompileTask(CancelableTaskManager* manager,
std::shared_ptr<BackgroundCompileToken> token,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters,
+ int task_id)
: CancelableTask(manager),
token_(std::move(token)),
- async_counters_(std::move(async_counters)) {}
+ async_counters_(std::move(async_counters)),
+ task_id_(task_id) {}
void RunInternal() override {
- TRACE_COMPILE("(3b) Compiling...\n");
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "BackgroundCompileTask::RunInternal");
-
- double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
-
- // These fields are initialized in a {BackgroundCompileScope} before
- // starting compilation.
- base::Optional<CompilationEnv> env;
- std::shared_ptr<WireBytesStorage> wire_bytes;
- std::shared_ptr<const WasmModule> module;
- std::unique_ptr<WasmCompilationUnit> unit;
- WasmFeatures detected_features = kNoWasmFeatures;
-
- // Preparation (synchronized): Initialize the fields above and get the first
- // compilation unit.
- {
- BackgroundCompileScope compile_scope(token_);
- if (compile_scope.cancelled()) return;
- env.emplace(compile_scope.native_module()->CreateCompilationEnv());
- wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
- module = compile_scope.native_module()->shared_module();
- unit = compile_scope.compilation_state()->GetNextCompilationUnit();
- if (unit == nullptr) {
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- return;
- }
- }
-
- bool compilation_failed = false;
- while (true) {
- // (asynchronous): Execute the compilation.
-
- WasmCompilationResult result = unit->ExecuteCompilation(
- &env.value(), wire_bytes, async_counters_.get(), &detected_features);
-
- // (synchronized): Publish the compilation result and get the next unit.
- {
- BackgroundCompileScope compile_scope(token_);
- if (compile_scope.cancelled()) return;
- WasmCode* code =
- unit->Publish(std::move(result), compile_scope.native_module());
- if (code == nullptr) {
- // Compile error.
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- compilation_failed = true;
- break;
- }
-
- // Successfully finished one unit.
- compile_scope.compilation_state()->OnFinishedUnit(
- unit->requested_tier(), code);
- if (deadline < MonotonicallyIncreasingTimeInMs()) {
- compile_scope.compilation_state()->ReportDetectedFeatures(
- detected_features);
- compile_scope.compilation_state()->RestartBackgroundCompileTask();
- return;
- }
-
- // Get next unit.
- unit = compile_scope.compilation_state()->GetNextCompilationUnit();
- if (unit == nullptr) {
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- return;
- }
- }
- }
- // We only get here if compilation failed. Other exits return directly.
- DCHECK(compilation_failed);
- USE(compilation_failed);
- token_->Cancel();
+ ExecuteCompilationUnits(token_, async_counters_.get(), task_id_,
+ kBaselineOrTopTier);
}
private:
- std::shared_ptr<BackgroundCompileToken> token_;
- std::shared_ptr<Counters> async_counters_;
+ const std::shared_ptr<BackgroundCompileToken> token_;
+ const std::shared_ptr<Counters> async_counters_;
+ const int task_id_;
};
} // namespace
-std::unique_ptr<NativeModule> CompileToNativeModule(
+std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out) {
@@ -768,8 +1043,8 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
- *export_wrappers_out =
- isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ *export_wrappers_out = isolate->factory()->NewFixedArray(
+ export_wrapper_size, AllocationType::kOld);
CompileJsToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
@@ -779,14 +1054,6 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
-void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
- ErrorThrower* thrower,
- const WasmModule* wasm_module,
- NativeModule* native_module) {
- native_module->DisableTrapHandler();
- CompileNativeModule(isolate, thrower, wasm_module, native_module);
-}
-
AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
@@ -936,7 +1203,8 @@ void AsyncCompileJob::FinishCompile() {
if (script->type() == Script::TYPE_WASM &&
module_object_->module()->source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate_->factory()->NewStringFromUtf8(
- CStrVector(module_object_->module()->source_map_url.c_str()), TENURED);
+ CStrVector(module_object_->module()->source_map_url.c_str()),
+ AllocationType::kOld);
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
isolate_->debug()->OnAfterCompile(script);
@@ -944,8 +1212,7 @@ void AsyncCompileJob::FinishCompile() {
// We can only update the feature counts once the entire compile is done.
auto compilation_state =
Impl(module_object_->native_module()->compilation_state());
- compilation_state->PublishDetectedFeatures(
- isolate_, *compilation_state->detected_features());
+ compilation_state->PublishDetectedFeatures(isolate_);
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
@@ -956,7 +1223,7 @@ void AsyncCompileJob::FinishCompile() {
FinishModule();
}
-void AsyncCompileJob::AsyncCompileFailed(const WasmError& error) {
+void AsyncCompileJob::DecodeFailed(const WasmError& error) {
ErrorThrower thrower(isolate_, "WebAssembly.compile()");
thrower.CompileFailed(error);
// {job} keeps the {this} pointer alive.
@@ -965,6 +1232,17 @@ void AsyncCompileJob::AsyncCompileFailed(const WasmError& error) {
resolver_->OnCompilationFailed(thrower.Reify());
}
+void AsyncCompileJob::AsyncCompileFailed() {
+ ErrorThrower thrower(isolate_, "WebAssembly.compile()");
+ ValidateSequentially(native_module_->module(), native_module_.get(),
+ isolate_->counters(), isolate_->allocator(), &thrower);
+ DCHECK(thrower.error());
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ resolver_->OnCompilationFailed(thrower.Reify());
+}
+
void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
resolver_->OnCompilationSucceeded(result);
}
@@ -984,12 +1262,14 @@ class AsyncCompileJob::CompilationStateCallback {
break;
case CompilationEvent::kFinishedTopTierCompilation:
DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
- // This callback should not react to top tier finished callbacks, since
- // the job might already be gone then.
+ // At this point, the job will already be gone, thus do not access it
+ // here.
break;
case CompilationEvent::kFailedCompilation: {
DCHECK(!last_event_.has_value());
- job_->DoSync<CompileFailed>();
+ if (job_->DecrementAndCheckFinisherCount()) {
+ job_->DoSync<CompileFailed>();
+ }
break;
}
default:
@@ -1150,6 +1430,30 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
job->enabled_features_, job->wire_bytes_.start(),
job->wire_bytes_.end(), false, kWasmOrigin, counters_,
job->isolate()->wasm_engine()->allocator());
+
+ // Validate lazy functions here.
+ auto enabled_features = job->enabled_features_;
+ if (enabled_features.compilation_hints && result.ok()) {
+ const WasmModule* module = result.value().get();
+ auto allocator = job->isolate()->wasm_engine()->allocator();
+ int start = module->num_imported_functions;
+ int end = start + module->num_declared_functions;
+
+ for (int func_index = start; func_index < end; func_index++) {
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code = job->wire_bytes_.GetFunctionBytes(func);
+
+ if (IsLazyCompilation(module, enabled_features, func_index)) {
+ DecodeResult function_result =
+ ValidateSingleFunction(module, func_index, code, counters_,
+ allocator, enabled_features);
+ if (function_result.failed()) {
+ result = ModuleResult(function_result.error());
+ break;
+ }
+ }
+ }
+ }
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
@@ -1176,8 +1480,8 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed(error_);
+ // {job_} is deleted in DecodeFailed, therefore the {return}.
+ return job->DecodeFailed(error_);
}
};
@@ -1203,15 +1507,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job->CreateNativeModule(module_);
- size_t num_functions =
- module_->functions.size() - module_->num_imported_functions;
-
- if (num_functions == 0) {
- // Degenerate case of an empty module.
- job->FinishCompile();
- return;
- }
-
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
compilation_state->AddCallback(CompilationStateCallback{job});
@@ -1221,11 +1516,8 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already.
- compilation_state->SetNumberOfFunctionsToCompile(
- module_->num_declared_functions);
// Add compilation units and kick off compilation.
- InitializeCompilationUnits(job->native_module_.get(),
- job->isolate()->wasm_engine());
+ InitializeCompilationUnits(job->native_module_.get());
}
}
};
@@ -1237,13 +1529,34 @@ class AsyncCompileJob::CompileFailed : public CompileStep {
private:
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(3a) Compilation failed\n");
+ DCHECK(job->native_module_->compilation_state()->failed());
- WasmError error =
- Impl(job->native_module_->compilation_state())->GetCompileError();
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed(error);
+ return job->AsyncCompileFailed();
+ }
+};
+
+namespace {
+class SampleTopTierCodeSizeCallback {
+ public:
+ explicit SampleTopTierCodeSizeCallback(
+ std::weak_ptr<NativeModule> native_module)
+ : native_module_(std::move(native_module)) {}
+
+ void operator()(CompilationEvent event) {
+ // This callback is registered after baseline compilation finished, so the
+ // only possible event to follow is {kFinishedTopTierCompilation}.
+ DCHECK_EQ(CompilationEvent::kFinishedTopTierCompilation, event);
+ if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
+ native_module->engine()->SampleTopTierCodeSizeInAllIsolates(
+ native_module);
+ }
}
+
+ private:
+ std::weak_ptr<NativeModule> native_module_;
};
+} // namespace
//==========================================================================
// Step 3b (sync): Compilation finished.
@@ -1252,6 +1565,15 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
private:
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(3b) Compilation finished\n");
+ DCHECK(!job->native_module_->compilation_state()->failed());
+ // Sample the generated code size when baseline compilation finished.
+ job->native_module_->SampleCodeSize(job->isolate_->counters(),
+ NativeModule::kAfterBaseline);
+ // Also, set a callback to sample the code size after top-tier compilation
+ // finished. This callback will *not* keep the NativeModule alive.
+ job->native_module_->compilation_state()->AddCallback(
+ SampleTopTierCodeSizeCallback{job->native_module_});
+ // Then finalize and publish the generated module.
job->FinishCompile();
}
};
@@ -1361,17 +1683,24 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// task.
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false);
- job_->native_module_->compilation_state()->SetWireBytesStorage(
- std::move(wire_bytes_storage));
-
auto* compilation_state = Impl(job_->native_module_->compilation_state());
- compilation_state->SetNumberOfFunctionsToCompile(functions_count);
+ compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
+
+ // Set number of functions that must be compiled to consider the module fully
+ // compiled.
+ auto wasm_module = job_->native_module_->module();
+ int num_functions = wasm_module->num_declared_functions;
+ DCHECK_IMPLIES(!job_->native_module_->enabled_features().compilation_hints,
+ wasm_module->num_lazy_compilation_hints == 0);
+ int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
+ compilation_state->SetNumberOfFunctionsToCompile(num_functions,
+ num_lazy_functions);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
- compilation_unit_builder_.reset(new CompilationUnitBuilder(
- job_->native_module_.get(), job_->isolate()->wasm_engine()));
+ compilation_unit_builder_.reset(
+ new CompilationUnitBuilder(job_->native_module_.get()));
return true;
}
@@ -1383,11 +1712,33 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
decoder_.DecodeFunctionBody(
num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
- int index = num_functions_ + decoder_.module()->num_imported_functions;
- compilation_unit_builder_->AddUnit(index);
+ NativeModule* native_module = job_->native_module_.get();
+ const WasmModule* module = native_module->module();
+ auto enabled_features = native_module->enabled_features();
+ uint32_t func_index =
+ num_functions_ + decoder_.module()->num_imported_functions;
+
+ if (IsLazyCompilation(module, native_module, enabled_features, func_index)) {
+ Counters* counters = Impl(native_module->compilation_state())->counters();
+ AccountingAllocator* allocator = native_module->engine()->allocator();
+
+ // The native module does not own the wire bytes until {SetWireBytes} is
+ // called in {OnFinishedStream}. Validation must use {bytes} parameter.
+ DecodeResult result = ValidateSingleFunction(
+ module, func_index, bytes, counters, allocator, enabled_features);
+
+ if (result.failed()) {
+ FinishAsyncCompileJobWithError(result.error());
+ return false;
+ }
+
+ native_module->UseLazyStub(func_index);
+ } else {
+ compilation_unit_builder_->AddUnits(func_index);
+ }
+
++num_functions_;
- // This method always succeeds. The return value is necessary to comply with
- // the StreamingProcessor interface.
+
return true;
}
@@ -1425,7 +1776,11 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
job_->native_module_->SetWireBytes(std::move(bytes));
if (needs_finish) {
- job_->FinishCompile();
+ if (job_->native_module_->compilation_state()->failed()) {
+ job_->AsyncCompileFailed();
+ } else {
+ job_->FinishCompile();
+ }
}
}
@@ -1461,9 +1816,20 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
+namespace {
+int GetMaxBackgroundTasks() {
+ if (NeedsDeterministicCompile()) return 1;
+ int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
+ int num_compile_tasks =
+ std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
+ return std::max(1, num_compile_tasks);
+}
+} // namespace
+
CompilationStateImpl::CompilationStateImpl(
- NativeModule* native_module, std::shared_ptr<Counters> async_counters)
- : native_module_(native_module),
+ const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters)
+ : native_module_(native_module.get()),
background_compile_token_(
std::make_shared<BackgroundCompileToken>(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
@@ -1471,13 +1837,14 @@ CompilationStateImpl::CompilationStateImpl(
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_background_tasks_(std::max(
- 1, std::min(FLAG_wasm_num_compilation_tasks,
- V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {}
-
-CompilationStateImpl::~CompilationStateImpl() {
- CompilationError* error = compile_error_.load(std::memory_order_acquire);
- if (error != nullptr) delete error;
+ max_background_tasks_(GetMaxBackgroundTasks()),
+ compilation_unit_queues_(max_background_tasks_),
+ available_task_ids_(max_background_tasks_) {
+ for (int i = 0; i < max_background_tasks_; ++i) {
+ // Ids are popped on task creation, so reverse this list. This ensures that
+ // the first background task gets id 0.
+ available_task_ids_[i] = max_background_tasks_ - 1 - i;
+ }
}
void CompilationStateImpl::AbortCompilation() {
@@ -1487,13 +1854,26 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
+void CompilationStateImpl::SetNumberOfFunctionsToCompile(
+ int num_functions, int num_lazy_functions) {
DCHECK(!failed());
base::MutexGuard guard(&callbacks_mutex_);
- outstanding_baseline_units_ = num_functions;
- if (compile_mode_ == CompileMode::kTiering) {
- outstanding_tiering_units_ = num_functions;
+ int num_functions_to_compile = num_functions - num_lazy_functions;
+ outstanding_baseline_functions_ = num_functions_to_compile;
+ outstanding_top_tier_functions_ = num_functions_to_compile;
+ highest_execution_tier_.assign(num_functions, ExecutionTier::kNone);
+
+ // Degenerate case of an empty module. Trigger callbacks immediately.
+ if (num_functions_to_compile == 0) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
}
}
@@ -1503,171 +1883,185 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
}
void CompilationStateImpl::AddCompilationUnits(
- std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
- std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
- {
- base::MutexGuard guard(&mutex_);
-
- if (compile_mode_ == CompileMode::kTiering) {
- DCHECK_EQ(baseline_units.size(), tiering_units.size());
- DCHECK_EQ(tiering_units.back()->requested_tier(),
- ExecutionTier::kOptimized);
- tiering_compilation_units_.insert(
- tiering_compilation_units_.end(),
- std::make_move_iterator(tiering_units.begin()),
- std::make_move_iterator(tiering_units.end()));
- } else {
- DCHECK(tiering_compilation_units_.empty());
- }
-
- baseline_compilation_units_.insert(
- baseline_compilation_units_.end(),
- std::make_move_iterator(baseline_units.begin()),
- std::make_move_iterator(baseline_units.end()));
- }
+ Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
+ compilation_unit_queues_.AddUnits(baseline_units, top_tier_units);
RestartBackgroundTasks();
}
-std::unique_ptr<WasmCompilationUnit>
-CompilationStateImpl::GetNextCompilationUnit() {
- base::MutexGuard guard(&mutex_);
-
- std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
- baseline_compilation_units_.empty() ? tiering_compilation_units_
- : baseline_compilation_units_;
+void CompilationStateImpl::AddTopTierCompilationUnit(
+ std::unique_ptr<WasmCompilationUnit> unit) {
+ AddCompilationUnits({}, {&unit, 1});
+}
- if (!units.empty()) {
- std::unique_ptr<WasmCompilationUnit> unit = std::move(units.back());
- units.pop_back();
- return unit;
- }
+std::unique_ptr<WasmCompilationUnit>
+CompilationStateImpl::GetNextCompilationUnit(
+ int task_id, CompileBaselineOnly baseline_only) {
+ return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
+}
- return std::unique_ptr<WasmCompilationUnit>();
+void CompilationStateImpl::OnFinishedUnit(WasmCode* code) {
+ OnFinishedUnits({&code, 1});
}
-void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
- // This mutex guarantees that events happen in the right order.
+void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
base::MutexGuard guard(&callbacks_mutex_);
- // If we are *not* compiling in tiering mode, then all units are counted as
- // baseline units.
- bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
- bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
-
- // Sanity check: If we are not in tiering mode, there cannot be outstanding
- // tiering units.
- DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
-
- bool baseline_finished = false;
- bool tiering_finished = false;
- if (is_tiering_unit) {
- DCHECK_LT(0, outstanding_tiering_units_);
- --outstanding_tiering_units_;
- tiering_finished = outstanding_tiering_units_ == 0;
- // If baseline compilation has not finished yet, then also trigger
- // {kFinishedBaselineCompilation}.
- baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
- } else {
- DCHECK_LT(0, outstanding_baseline_units_);
- --outstanding_baseline_units_;
- // If we are in tiering mode and tiering finished before, then do not
- // trigger baseline finished.
- baseline_finished = outstanding_baseline_units_ == 0 &&
- (!is_tiering_mode || outstanding_tiering_units_ > 0);
- // If we are not tiering, then we also trigger the "top tier finished"
- // event when baseline compilation is finished.
- tiering_finished = baseline_finished && !is_tiering_mode;
- }
-
- if (baseline_finished) {
- for (auto& callback : callbacks_)
- callback(CompilationEvent::kFinishedBaselineCompilation);
- }
- if (tiering_finished) {
- for (auto& callback : callbacks_)
- callback(CompilationEvent::kFinishedTopTierCompilation);
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
- }
+ // Assume an order of execution tiers that represents the quality of their
+ // generated code.
+ static_assert(ExecutionTier::kNone < ExecutionTier::kInterpreter &&
+ ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+
+ auto module = native_module_->module();
+ auto enabled_features = native_module_->enabled_features();
+ for (WasmCode* code : code_vector) {
+ DCHECK_NOT_NULL(code);
+ DCHECK_NE(code->tier(), ExecutionTier::kNone);
+ native_module_->engine()->LogCode(code);
+
+ // Skip lazily compiled code as we do not consider this for the completion
+ // of baseline respectively top tier compilation.
+ int func_index = code->index();
+ if (IsLazyCompilation(module, native_module_, enabled_features,
+ func_index)) {
+ continue;
+ }
- if (code != nullptr) native_module_->engine()->LogCode(code);
+ // Determine whether we are reaching baseline or top tier with the given
+ // code.
+ uint32_t slot_index = code->index() - module->num_imported_functions;
+ ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
+ module, compile_mode(), enabled_features, func_index);
+ DCHECK_EQ(highest_execution_tier_.size(), module->num_declared_functions);
+ ExecutionTier prior_tier = highest_execution_tier_[slot_index];
+ bool had_reached_baseline = prior_tier >= requested_tiers.baseline_tier;
+ bool had_reached_top_tier = prior_tier >= requested_tiers.top_tier;
+ DCHECK_IMPLIES(had_reached_baseline, prior_tier > ExecutionTier::kNone);
+ bool reaches_baseline = !had_reached_baseline;
+ bool reaches_top_tier =
+ !had_reached_top_tier && code->tier() >= requested_tiers.top_tier;
+ DCHECK_IMPLIES(reaches_baseline,
+ code->tier() >= requested_tiers.baseline_tier);
+ DCHECK_IMPLIES(reaches_top_tier, had_reached_baseline || reaches_baseline);
+
+ // Remember compilation state before update.
+ bool had_completed_baseline_compilation =
+ outstanding_baseline_functions_ == 0;
+ bool had_completed_top_tier_compilation =
+ outstanding_top_tier_functions_ == 0;
+
+ // Update compilation state.
+ if (code->tier() > prior_tier) {
+ highest_execution_tier_[slot_index] = code->tier();
+ }
+ if (reaches_baseline) outstanding_baseline_functions_--;
+ if (reaches_top_tier) outstanding_top_tier_functions_--;
+ DCHECK_LE(0, outstanding_baseline_functions_);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+
+ // Conclude if we are completing baseline or top tier compilation.
+ bool completes_baseline_compilation = !had_completed_baseline_compilation &&
+ outstanding_baseline_functions_ == 0;
+ bool completes_top_tier_compilation = !had_completed_top_tier_compilation &&
+ outstanding_top_tier_functions_ == 0;
+ DCHECK_IMPLIES(
+ completes_top_tier_compilation,
+ had_completed_baseline_compilation || completes_baseline_compilation);
+
+ // Trigger callbacks.
+ if (completes_baseline_compilation) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ }
+ if (completes_top_tier_compilation) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
+ }
+ }
}
-void CompilationStateImpl::RestartBackgroundCompileTask() {
- auto task =
- native_module_->engine()->NewBackgroundCompileTask<BackgroundCompileTask>(
- background_compile_token_, async_counters_);
-
- if (baseline_compilation_finished()) {
- V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
- std::move(task));
- } else {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+void CompilationStateImpl::OnBackgroundTaskStopped(
+ int task_id, const WasmFeatures& detected) {
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(0, std::count(available_task_ids_.begin(),
+ available_task_ids_.end(), task_id));
+ DCHECK_GT(max_background_tasks_, available_task_ids_.size());
+ available_task_ids_.push_back(task_id);
+ UnionFeaturesInto(&detected_features_, detected);
}
-}
-void CompilationStateImpl::ReportDetectedFeatures(
- const WasmFeatures& detected) {
- base::MutexGuard guard(&mutex_);
- UnionFeaturesInto(&detected_features_, detected);
+ // The background task could have stopped while we were adding new units, or
+ // because it reached its deadline. In both cases we need to restart tasks to
+ // avoid a potential deadlock.
+ RestartBackgroundTasks();
}
-void CompilationStateImpl::OnBackgroundTaskStopped(
+void CompilationStateImpl::UpdateDetectedFeatures(
const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
- DCHECK_LE(1, num_background_tasks_);
- --num_background_tasks_;
UnionFeaturesInto(&detected_features_, detected);
}
-void CompilationStateImpl::PublishDetectedFeatures(
- Isolate* isolate, const WasmFeatures& detected) {
+void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
// Notifying the isolate of the feature counts must take place under
// the mutex, because even if we have finished baseline compilation,
// tiering compilations may still occur in the background.
base::MutexGuard guard(&mutex_);
- UnionFeaturesInto(&detected_features_, detected);
UpdateFeatureUseCounts(isolate, detected_features_);
}
void CompilationStateImpl::RestartBackgroundTasks() {
- int num_restart;
+ // Create new tasks, but only spawn them after releasing the mutex, because
+ // some platforms (e.g. the predictable platform) might execute tasks right
+ // away.
+ std::vector<std::unique_ptr<Task>> new_tasks;
{
base::MutexGuard guard(&mutex_);
+ // Explicit fast path (quite common): If no more task ids are available
+ // (i.e. {max_background_tasks_} tasks are already running), spawn nothing.
+ if (available_task_ids_.empty()) return;
// No need to restart tasks if compilation already failed.
if (failed()) return;
- DCHECK_LE(num_background_tasks_, max_background_tasks_);
- if (num_background_tasks_ == max_background_tasks_) return;
- size_t num_compilation_units =
- baseline_compilation_units_.size() + tiering_compilation_units_.size();
- num_restart = max_background_tasks_ - num_background_tasks_;
- DCHECK_LE(0, num_restart);
- if (num_compilation_units < static_cast<size_t>(num_restart)) {
- num_restart = static_cast<int>(num_compilation_units);
+ size_t max_num_restart = compilation_unit_queues_.GetTotalSize();
+
+ while (!available_task_ids_.empty() && max_num_restart-- > 0) {
+ int task_id = available_task_ids_.back();
+ available_task_ids_.pop_back();
+ new_tasks.emplace_back(
+ native_module_->engine()
+ ->NewBackgroundCompileTask<BackgroundCompileTask>(
+ background_compile_token_, async_counters_, task_id));
}
- num_background_tasks_ += num_restart;
}
- for (; num_restart > 0; --num_restart) {
- RestartBackgroundCompileTask();
+ if (baseline_compilation_finished()) {
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
+ std::move(task));
+ }
+ } else {
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
}
}
-void CompilationStateImpl::SetError(uint32_t func_index,
- const WasmError& error) {
- DCHECK(error.has_error());
- std::unique_ptr<CompilationError> compile_error =
- base::make_unique<CompilationError>(func_index, error);
- CompilationError* expected = nullptr;
- bool set = compile_error_.compare_exchange_strong(
- expected, compile_error.get(), std::memory_order_acq_rel);
- // Ignore all but the first error. If the previous value is not nullptr, just
- // return (and free the allocated error).
- if (!set) return;
- // If set successfully, give up ownership.
- compile_error.release();
+void CompilationStateImpl::SetError() {
+ bool expected = false;
+ if (!compile_failed_.compare_exchange_strong(expected, true,
+ std::memory_order_relaxed)) {
+ return; // Already failed before.
+ }
+
base::MutexGuard callbacks_guard(&callbacks_mutex_);
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFailedCompilation);
@@ -1714,12 +2108,13 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), TENURED);
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld);
script->set_name(*name_str.ToHandleChecked());
if (source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
- CStrVector(source_map_url.c_str()), TENURED);
+ CStrVector(source_map_url.c_str()), AllocationType::kOld);
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
return script;