summaryrefslogtreecommitdiff
path: root/chromium/v8/src/snapshot
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/snapshot')
-rw-r--r--chromium/v8/src/snapshot/builtin-deserializer-allocator.cc137
-rw-r--r--chromium/v8/src/snapshot/builtin-deserializer-allocator.h33
-rw-r--r--chromium/v8/src/snapshot/builtin-deserializer.cc117
-rw-r--r--chromium/v8/src/snapshot/builtin-deserializer.h11
-rw-r--r--chromium/v8/src/snapshot/builtin-serializer.cc62
-rw-r--r--chromium/v8/src/snapshot/builtin-serializer.h16
-rw-r--r--chromium/v8/src/snapshot/builtin-snapshot-utils.cc67
-rw-r--r--chromium/v8/src/snapshot/builtin-snapshot-utils.h56
-rw-r--r--chromium/v8/src/snapshot/code-serializer.cc63
-rw-r--r--chromium/v8/src/snapshot/code-serializer.h15
-rw-r--r--chromium/v8/src/snapshot/default-deserializer-allocator.cc5
-rw-r--r--chromium/v8/src/snapshot/default-deserializer-allocator.h2
-rw-r--r--chromium/v8/src/snapshot/deserializer.cc36
-rw-r--r--chromium/v8/src/snapshot/deserializer.h5
-rw-r--r--chromium/v8/src/snapshot/mksnapshot.cc97
-rw-r--r--chromium/v8/src/snapshot/object-deserializer.cc15
-rw-r--r--chromium/v8/src/snapshot/partial-serializer.cc8
-rw-r--r--chromium/v8/src/snapshot/serializer-common.h42
-rw-r--r--chromium/v8/src/snapshot/serializer.cc47
-rw-r--r--chromium/v8/src/snapshot/serializer.h8
-rw-r--r--chromium/v8/src/snapshot/snapshot-common.cc122
-rw-r--r--chromium/v8/src/snapshot/snapshot-source-sink.h6
-rw-r--r--chromium/v8/src/snapshot/snapshot.h34
-rw-r--r--chromium/v8/src/snapshot/startup-deserializer.cc6
-rw-r--r--chromium/v8/src/snapshot/startup-serializer.cc12
-rw-r--r--chromium/v8/src/snapshot/startup-serializer.h10
26 files changed, 355 insertions, 677 deletions
diff --git a/chromium/v8/src/snapshot/builtin-deserializer-allocator.cc b/chromium/v8/src/snapshot/builtin-deserializer-allocator.cc
index 4e3d546fa0a..80300c9f1df 100644
--- a/chromium/v8/src/snapshot/builtin-deserializer-allocator.cc
+++ b/chromium/v8/src/snapshot/builtin-deserializer-allocator.cc
@@ -19,16 +19,6 @@ BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
-BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
- delete handler_allocations_;
-}
-
-namespace {
-int HandlerAllocationIndex(int code_object_id) {
- return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
-}
-} // namespace
-
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
@@ -39,30 +29,14 @@ Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
RegisterCodeObjectAllocation(code_object_id);
#endif
- if (BSU::IsBuiltinIndex(code_object_id)) {
- Object* obj = isolate()->builtins()->builtin(code_object_id);
- DCHECK(Internals::HasHeapObjectTag(obj));
- return HeapObject::cast(obj)->address();
- } else if (BSU::IsHandlerIndex(code_object_id)) {
- if (handler_allocation_ != kNullAddress) {
- // Lazy deserialization.
- DCHECK_NULL(handler_allocations_);
- return handler_allocation_;
- } else {
- // Eager deserialization.
- DCHECK_EQ(kNullAddress, handler_allocation_);
- DCHECK_NOT_NULL(handler_allocations_);
- int index = HandlerAllocationIndex(code_object_id);
- DCHECK_NE(kNullAddress, handler_allocations_->at(index));
- return handler_allocations_->at(index);
- }
- }
-
- UNREACHABLE();
+ DCHECK(Builtins::IsBuiltinId(code_object_id));
+ Object* obj = isolate()->builtins()->builtin(code_object_id);
+ DCHECK(Internals::HasHeapObjectTag(obj));
+ return HeapObject::cast(obj)->address();
}
Heap::Reservation
-BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
+BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
Heap::Reservation result;
// Reservations for builtins.
@@ -77,7 +51,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
@@ -91,28 +65,6 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
- // Reservations for bytecode handlers.
-
- BSU::ForEachBytecode(
- [=, &result](Bytecode bytecode, OperandScale operand_scale) {
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- // Bytecodes without a handler don't require a reservation.
- return;
- } else if (FLAG_lazy_handler_deserialization &&
- deserializer()->IsLazyDeserializationEnabled() &&
- Bytecodes::IsLazy(bytecode)) {
- // Skip lazy handlers. These will be replaced by the DeserializeLazy
- // code object in InitializeFromReservations and thus require no
- // reserved space.
- return;
- }
-
- const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
- uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
- DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({handler_size, kNullAddress, kNullAddress});
- });
-
return result;
}
@@ -130,26 +82,6 @@ void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
#endif
}
-void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
- const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale) {
- DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
- BSU::BytecodeToIndex(bytecode, operand_scale)),
- chunk.size);
- DCHECK_EQ(chunk.size, chunk.end - chunk.start);
-
- SkipList::Update(chunk.start, chunk.size);
-
- DCHECK_NOT_NULL(handler_allocations_);
- const int index =
- HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
- handler_allocations_->at(index) = chunk.start;
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
-#endif
-}
-
void BuiltinDeserializerAllocator::InitializeFromReservations(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
@@ -168,41 +100,18 @@ void BuiltinDeserializerAllocator::InitializeFromReservations(
reservation_index++;
}
- Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
-
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- builtins->set_builtin(i, deserialize_lazy);
+ builtins->set_builtin(
+ i, builtins->builtin(builtins->LazyDeserializerForBuiltin(i)));
} else {
InitializeBuiltinFromReservation(reservation[reservation_index], i);
reservation_index++;
}
}
- // Initialize interpreter bytecode handler reservations.
-
- DCHECK_NULL(handler_allocations_);
- handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
-
- BSU::ForEachBytecode(
- [=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- // Bytecodes without a handler don't have a reservation.
- return;
- } else if (FLAG_lazy_handler_deserialization &&
- deserializer()->IsLazyDeserializationEnabled() &&
- Bytecodes::IsLazy(bytecode)) {
- // Likewise, bytecodes with lazy handlers don't either.
- return;
- }
-
- InitializeHandlerFromReservation(reservation[reservation_index],
- bytecode, operand_scale);
- reservation_index++;
- });
-
DCHECK_EQ(reservation.size(), reservation_index);
}
@@ -211,9 +120,9 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->builtins()->is_initialized());
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK_EQ(Builtins::kDeserializeLazy,
- isolate()->builtins()->builtin(builtin_id)->builtin_index());
+ DCHECK(!Builtins::IsLazyDeserializer(builtin_id));
+ DCHECK(Builtins::IsLazyDeserializer(
+ isolate()->builtins()->builtin(builtin_id)->builtin_index()));
const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id);
@@ -236,28 +145,6 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
#endif
}
-void BuiltinDeserializerAllocator::ReserveForHandler(
- Bytecode bytecode, OperandScale operand_scale) {
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
-
- const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
- const uint32_t handler_size =
- deserializer()->ExtractCodeObjectSize(code_object_id);
- DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
-
- handler_allocation_ =
- isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
-
-// Note: After this point and until deserialization finishes, heap allocation
-// is disallowed. We currently can't safely assert this since we'd need to
-// pass the DisallowHeapAllocation scope out of this function.
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(code_object_id);
-#endif
-}
-
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
diff --git a/chromium/v8/src/snapshot/builtin-deserializer-allocator.h b/chromium/v8/src/snapshot/builtin-deserializer-allocator.h
index 65c5872d7a1..b606eb27490 100644
--- a/chromium/v8/src/snapshot/builtin-deserializer-allocator.h
+++ b/chromium/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -30,8 +30,6 @@ class BuiltinDeserializerAllocator final {
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
- ~BuiltinDeserializerAllocator();
-
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@@ -42,13 +40,10 @@ class BuiltinDeserializerAllocator final {
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
- // There are three ways that we use to reserve / allocate space. In all
- // cases, required objects are requested from the GC prior to
- // deserialization. 1. pre-allocated builtin code objects are written into
- // the builtins table (this is to make deserialization of builtin references
- // easier). Pre-allocated handler code objects are 2. stored in the
- // {handler_allocations_} vector (at eager-deserialization time) and 3.
- // stored in {handler_allocation_} (at lazy-deserialization time).
+ // There is one way that we use to reserve / allocate space. Required objects
+ // are requested from the GC prior to deserialization. Pre-allocated builtin
+ // code objects are written into the builtins table (this is to make
+ // deserialization of builtin references easier).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeFromReservations.
@@ -83,23 +78,19 @@ class BuiltinDeserializerAllocator final {
// Builtin deserialization does not bake reservations into the snapshot, hence
// this is a nop.
- void DecodeReservation(std::vector<SerializedData::Reservation> res) {}
+ void DecodeReservation(const std::vector<SerializedData::Reservation>& res) {}
// These methods are used to pre-allocate builtin objects prior to
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
- Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
+ Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeFromReservations(const Heap::Reservation& reservation);
// Creates reservations and initializes the builtins table in preparation for
// lazily deserializing a single builtin.
void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
- // Pre-allocates a code object preparation for lazily deserializing a single
- // handler.
- void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
-
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
@@ -113,11 +104,6 @@ class BuiltinDeserializerAllocator final {
void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
int builtin_id);
- // As above, but for interpreter bytecode handlers.
- void InitializeHandlerFromReservation(
- const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale);
-
#ifdef DEBUG
void RegisterCodeObjectReservation(int code_object_id);
void RegisterCodeObjectAllocation(int code_object_id);
@@ -130,13 +116,6 @@ class BuiltinDeserializerAllocator final {
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
- // Stores allocated space for bytecode handlers during eager deserialization.
- std::vector<Address>* handler_allocations_ = nullptr;
-
- // Stores the allocated space for a single handler during lazy
- // deserialization.
- Address handler_allocation_ = kNullAddress;
-
bool next_reference_is_weak_ = false;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
diff --git a/chromium/v8/src/snapshot/builtin-deserializer.cc b/chromium/v8/src/snapshot/builtin-deserializer.cc
index 0e32844ba01..136b74b26e2 100644
--- a/chromium/v8/src/snapshot/builtin-deserializer.cc
+++ b/chromium/v8/src/snapshot/builtin-deserializer.cc
@@ -42,24 +42,24 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
code_offsets_ = data->BuiltinOffsets();
- DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
+ DCHECK_EQ(Builtins::builtin_count, code_offsets_.length());
DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
-void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
+void BuiltinDeserializer::DeserializeEagerBuiltins() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// Deserialize builtins.
Builtins* builtins = isolate()->builtins();
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeFromReservations.
- DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
+ DCHECK_EQ(builtins->builtin(builtins->LazyDeserializerForBuiltin(i)),
builtins->builtin(i));
} else {
builtins->set_builtin(i, DeserializeBuiltinRaw(i));
@@ -67,7 +67,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
#ifdef DEBUG
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
@@ -77,7 +77,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
if (FLAG_print_builtin_code) {
// We can't print builtins during deserialization because they may refer
// to not yet deserialized builtins.
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (!IsLazyDeserializationEnabled() || !Builtins::IsLazy(i)) {
Code* code = builtins->builtin(i);
const char* name = Builtins::name(i);
@@ -86,38 +86,6 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
}
#endif
-
- // Deserialize bytecode handlers.
-
- Interpreter* interpreter = isolate()->interpreter();
- DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- // Bytecodes without a dedicated handler are patched up in a second pass.
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
-
- // If lazy-deserialization is enabled and the current bytecode is lazy,
- // we write the generic LazyDeserialization handler into the dispatch table
- // and deserialize later upon first use.
- Code* code = (FLAG_lazy_handler_deserialization &&
- IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
- ? GetDeserializeLazyHandler(operand_scale)
- : DeserializeHandlerRaw(bytecode, operand_scale);
-
- interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
- });
-
- // Patch up holes in the dispatch table.
-
- Code* illegal_handler = interpreter->GetBytecodeHandler(
- Bytecode::kIllegal, OperandScale::kSingle);
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
- interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
- });
-
- DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
@@ -135,13 +103,6 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return code;
}
-Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
- OperandScale operand_scale) {
- allocator()->ReserveForHandler(bytecode, operand_scale);
- DisallowHeapAllocation no_gc;
- return DeserializeHandlerRaw(bytecode, operand_scale);
-}
-
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
@@ -162,8 +123,19 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
- PROFILE(isolate(), CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
- AbstractCode::cast(code),
+ CodeEventListener::LogEventsAndTags code_tag;
+ switch (code->kind()) {
+ case AbstractCode::BUILTIN:
+ code_tag = CodeEventListener::BUILTIN_TAG;
+ break;
+ case AbstractCode::BYTECODE_HANDLER:
+ code_tag = CodeEventListener::BYTECODE_HANDLER_TAG;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ PROFILE(isolate(), CodeCreateEvent(code_tag, AbstractCode::cast(code),
Builtins::name(builtin_id)));
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(
@@ -172,42 +144,8 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
return code;
}
-Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
- OperandScale operand_scale) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
-
- const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
- DeserializingCodeObjectScope scope(this, code_object_id);
-
- const int initial_position = source()->position();
- source()->set_position(code_offsets_[code_object_id]);
-
- Object* o = ReadDataSingle();
- DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
-
- // Rewind.
- source()->set_position(initial_position);
-
- // Flush the instruction cache.
- Code* code = Code::cast(o);
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
-
- std::string name = Bytecodes::ToString(bytecode, operand_scale);
- PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
- AbstractCode::cast(code), name.c_str()));
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- code->PrintBuiltinCode(isolate(), name.c_str());
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
- DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
+ DCHECK_LT(code_object_id, Builtins::builtin_count);
const int initial_position = source()->position();
@@ -225,20 +163,5 @@ uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
return result;
}
-Code* BuiltinDeserializer::GetDeserializeLazyHandler(
- interpreter::OperandScale operand_scale) const {
- STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
- switch (operand_scale) {
- case OperandScale::kSingle:
- return Code::cast(isolate()->heap()->deserialize_lazy_handler());
- case OperandScale::kDouble:
- return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
- case OperandScale::kQuadruple:
- return Code::cast(
- isolate()->heap()->deserialize_lazy_handler_extra_wide());
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/snapshot/builtin-deserializer.h b/chromium/v8/src/snapshot/builtin-deserializer.h
index 1ae49686b8b..e77598db685 100644
--- a/chromium/v8/src/snapshot/builtin-deserializer.h
+++ b/chromium/v8/src/snapshot/builtin-deserializer.h
@@ -7,7 +7,6 @@
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
-#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
@@ -32,25 +31,17 @@ class BuiltinDeserializer final
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
- void DeserializeEagerBuiltinsAndHandlers();
+ void DeserializeEagerBuiltins();
// Deserializes the single given builtin. This is used whenever a builtin is
// lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
- // Deserializes the single given handler. This is used whenever a handler is
- // lazily deserialized at runtime.
- Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
-
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
Code* DeserializeBuiltinRaw(int builtin_id);
- // Deserializes the single given bytecode handler. Assumes that reservations
- // have already been allocated.
- Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
-
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
diff --git a/chromium/v8/src/snapshot/builtin-serializer.cc b/chromium/v8/src/snapshot/builtin-serializer.cc
index 0109a85b6b0..6c71606b2ed 100644
--- a/chromium/v8/src/snapshot/builtin-serializer.cc
+++ b/chromium/v8/src/snapshot/builtin-serializer.cc
@@ -26,42 +26,24 @@ BuiltinSerializer::~BuiltinSerializer() {
void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
// Serialize builtins.
- STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
-
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code* code = isolate()->builtins()->builtin(i);
+ DCHECK_IMPLIES(Builtins::IsLazyDeserializer(code),
+ Builtins::IsLazyDeserializer(i));
SetBuiltinOffset(i, sink_.Position());
- SerializeBuiltin(isolate()->builtins()->builtin(i));
+ SerializeBuiltin(code);
}
- // Serialize bytecode handlers.
-
- STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- SetHandlerOffset(bytecode, operand_scale, sink_.Position());
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
-
- SerializeHandler(
- isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
- });
-
- STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
- BSU::kNumberOfCodeObjects);
-
- // The DeserializeLazy handlers are serialized by the StartupSerializer
- // during strong root iteration.
-
- DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
- DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
- DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
+ // Append the offset table. During deserialization, the offset table is
+ // extracted by BuiltinSnapshotData.
+ const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
+ int data_length = static_cast<int>(sizeof(code_offsets_));
// Pad with kNop since GetInt() might read too far.
- Pad();
+ Pad(data_length);
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
- const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
- int data_length = static_cast<int>(sizeof(code_offsets_));
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
@@ -83,20 +65,13 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
-void BuiltinSerializer::SerializeHandler(Code* code) {
- DCHECK(ObjectIsBytecodeHandler(code));
- ObjectSerializer object_serializer(this, code, &sink_, kPlain,
- kStartOfObject);
- object_serializer.Serialize();
-}
-
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
// Roots can simply be serialized as root references.
- int root_index = root_index_map()->Lookup(o);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(o, &root_index)) {
DCHECK(startup_serializer_->root_has_been_serialized(root_index));
PutRoot(root_index, o, how_to_code, where_to_point, skip);
return;
@@ -115,8 +90,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
// * Strings: CSA_ASSERTs in debug builds, various other string constants.
// * HeapNumbers: Embedded constants.
// TODO(6624): Jump targets should never trigger content serialization, it
- // should always result in a reference instead. Reloc infos and handler
- // tables should not end up in the partial snapshot cache.
+ // should always result in a reference instead. Reloc infos and handler tables
+ // should not end up in the partial snapshot cache.
FlushSkip(skip);
@@ -128,17 +103,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK(BSU::IsBuiltinIndex(builtin_id));
code_offsets_[builtin_id] = offset;
}
-void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t offset) {
- const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
- DCHECK(BSU::IsHandlerIndex(index));
- code_offsets_[index] = offset;
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/snapshot/builtin-serializer.h b/chromium/v8/src/snapshot/builtin-serializer.h
index abc8be74e5a..132aa0894b8 100644
--- a/chromium/v8/src/snapshot/builtin-serializer.h
+++ b/chromium/v8/src/snapshot/builtin-serializer.h
@@ -5,9 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+#include "src/builtins/builtins.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-serializer-allocator.h"
-#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -15,12 +15,10 @@ namespace internal {
class StartupSerializer;
-// Responsible for serializing builtin and bytecode handler objects during
-// startup snapshot creation into a dedicated area of the snapshot.
+// Responsible for serializing builtin objects during startup snapshot creation
+// into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
- using BSU = BuiltinSnapshotUtils;
-
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
@@ -32,7 +30,6 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
Object** end) override;
void SerializeBuiltin(Code* code);
- void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
@@ -47,14 +44,11 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
// Stores the starting offset, within the serialized data, of each code
// object. This is later packed into the builtin snapshot, and used by the
- // builtin deserializer to deserialize individual builtins and bytecode
- // handlers.
+ // builtin deserializer to deserialize individual builtins.
//
// Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
// Builtin offsets.
- // Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
- // Bytecode handler offsets.
- uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
+ uint32_t code_offsets_[Builtins::builtin_count];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
diff --git a/chromium/v8/src/snapshot/builtin-snapshot-utils.cc b/chromium/v8/src/snapshot/builtin-snapshot-utils.cc
deleted file mode 100644
index e32a857c0b4..00000000000
--- a/chromium/v8/src/snapshot/builtin-snapshot-utils.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-snapshot-utils.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
- return (kFirstBuiltinIndex <= maybe_index &&
- maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
-}
-
-// static
-bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
- return (kFirstHandlerIndex <= maybe_index &&
- maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
-}
-
-// static
-int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
- OperandScale operand_scale) {
- int index =
- BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
- switch (operand_scale) { // clang-format off
- case OperandScale::kSingle: return index;
- case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
- case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
- } // clang-format on
- UNREACHABLE();
-}
-
-// static
-std::pair<interpreter::Bytecode, interpreter::OperandScale>
-BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
- DCHECK(IsHandlerIndex(index));
-
- const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
- Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
- switch (x / Bytecodes::kBytecodeCount) { // clang-format off
- case 0: return {bytecode, OperandScale::kSingle};
- case 1: return {bytecode, OperandScale::kDouble};
- case 2: return {bytecode, OperandScale::kQuadruple};
- default: UNREACHABLE();
- } // clang-format on
-}
-
-// static
-void BuiltinSnapshotUtils::ForEachBytecode(
- std::function<void(Bytecode, OperandScale)> f) {
- static const OperandScale kOperandScales[] = {
-#define VALUE(Name, _) OperandScale::k##Name,
- OPERAND_SCALE_LIST(VALUE)
-#undef VALUE
- };
-
- for (OperandScale operand_scale : kOperandScales) {
- for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
- f(Bytecodes::FromByte(i), operand_scale);
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/snapshot/builtin-snapshot-utils.h b/chromium/v8/src/snapshot/builtin-snapshot-utils.h
deleted file mode 100644
index 587b4a35b02..00000000000
--- a/chromium/v8/src/snapshot/builtin-snapshot-utils.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
-#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
-
-#include <functional>
-
-#include "src/interpreter/interpreter.h"
-
-namespace v8 {
-namespace internal {
-
-// Constants and utility methods used by builtin and bytecode handler
-// (de)serialization.
-class BuiltinSnapshotUtils : public AllStatic {
- using Bytecode = interpreter::Bytecode;
- using BytecodeOperands = interpreter::BytecodeOperands;
- using Bytecodes = interpreter::Bytecodes;
- using Interpreter = interpreter::Interpreter;
- using OperandScale = interpreter::OperandScale;
-
- public:
- static const int kFirstBuiltinIndex = 0;
- static const int kNumberOfBuiltins = Builtins::builtin_count;
-
- static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
- static const int kNumberOfHandlers =
- Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
-
- // The number of code objects in the builtin snapshot.
- // TODO(jgruber): This could be reduced by a bit since not every
- // {bytecode, operand_scale} combination has an associated handler
- // (see Bytecodes::BytecodeHasHandler).
- static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
-
- // Indexes into the offsets vector contained in snapshot.
- // See e.g. BuiltinSerializer::code_offsets_.
- static bool IsBuiltinIndex(int maybe_index);
- static bool IsHandlerIndex(int maybe_index);
- static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
-
- // Converts an index back into the {bytecode,operand_scale} tuple. This is the
- // inverse operation of BytecodeToIndex().
- static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
-
- // Iteration over all {bytecode,operand_scale} pairs. Implemented here since
- // (de)serialization depends on the iteration order.
- static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
diff --git a/chromium/v8/src/snapshot/code-serializer.cc b/chromium/v8/src/snapshot/code-serializer.cc
index 5db7cae94b2..b463ca2047e 100644
--- a/chromium/v8/src/snapshot/code-serializer.cc
+++ b/chromium/v8/src/snapshot/code-serializer.cc
@@ -4,8 +4,6 @@
#include "src/snapshot/code-serializer.h"
-#include <memory>
-
#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -126,8 +124,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(obj, &root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
@@ -336,44 +334,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
return scope.CloseAndEscape(result);
}
-class Checksum {
- public:
- explicit Checksum(Vector<const byte> payload) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
-#endif // MEMORY_SANITIZER
- // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
- uintptr_t a = 1;
- uintptr_t b = 0;
- const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
- DCHECK(IsAligned(payload.length(), kIntptrSize));
- const uintptr_t* end = cur + payload.length() / kIntptrSize;
- while (cur < end) {
- // Unsigned overflow expected and intended.
- a += *cur++;
- b += a;
- }
-#if V8_HOST_ARCH_64_BIT
- a ^= a >> 32;
- b ^= b >> 32;
-#endif // V8_HOST_ARCH_64_BIT
- a_ = static_cast<uint32_t>(a);
- b_ = static_cast<uint32_t>(b);
- }
-
- bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
-
- uint32_t a() const { return a_; }
- uint32_t b() const { return b_; }
-
- private:
- uint32_t a_;
- uint32_t b_;
-
- DISALLOW_COPY_AND_ASSIGN(Checksum);
-};
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
@@ -390,10 +350,14 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
uint32_t size =
padded_payload_offset + static_cast<uint32_t>(payload->size());
+ DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data_, 0, padded_payload_offset);
+
// Set header values.
SetMagicNumber(cs->isolate());
SetHeaderValue(kVersionHashOffset, Version::Hash());
@@ -418,16 +382,13 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
CopyBytes(data_ + kHeaderSize + reservation_size,
reinterpret_cast<const byte*>(stub_keys->data()), stub_keys_size);
- // Zero out any padding before the payload.
- memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
-
// Copy serialized data.
CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
- Checksum checksum(DataWithoutHeader());
- SetHeaderValue(kChecksum1Offset, checksum.a());
- SetHeaderValue(kChecksum2Offset, checksum.b());
+ Checksum checksum(ChecksummedContent());
+ SetHeaderValue(kChecksumPartAOffset, checksum.a());
+ SetHeaderValue(kChecksumPartBOffset, checksum.b());
}
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
@@ -440,8 +401,8 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- uint32_t c1 = GetHeaderValue(kChecksum1Offset);
- uint32_t c2 = GetHeaderValue(kChecksum2Offset);
+ uint32_t c1 = GetHeaderValue(kChecksumPartAOffset);
+ uint32_t c2 = GetHeaderValue(kChecksumPartBOffset);
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
@@ -454,7 +415,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
GetHeaderValue(kNumReservationsOffset) * kInt32Size +
GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size);
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
- if (!Checksum(DataWithoutHeader()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+ if (!Checksum(ChecksummedContent()).Check(c1, c2)) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
}
diff --git a/chromium/v8/src/snapshot/code-serializer.h b/chromium/v8/src/snapshot/code-serializer.h
index d1f19ef081e..d9b4be9a342 100644
--- a/chromium/v8/src/snapshot/code-serializer.h
+++ b/chromium/v8/src/snapshot/code-serializer.h
@@ -110,8 +110,8 @@ class SerializedCodeData : public SerializedData {
// [6] number of code stub keys
// [7] number of reservation size entries
// [8] payload length
- // [9] payload checksum part 1
- // [10] payload checksum part 2
+ // [9] payload checksum part A
+ // [10] payload checksum part B
// ... reservations
// ... code stub keys
// ... serialized payload
@@ -124,9 +124,12 @@ class SerializedCodeData : public SerializedData {
kNumReservationsOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
kNumCodeStubKeysOffset + kUInt32Size;
- static const uint32_t kChecksum1Offset = kPayloadLengthOffset + kUInt32Size;
- static const uint32_t kChecksum2Offset = kChecksum1Offset + kUInt32Size;
- static const uint32_t kUnalignedHeaderSize = kChecksum2Offset + kUInt32Size;
+ static const uint32_t kChecksumPartAOffset =
+ kPayloadLengthOffset + kUInt32Size;
+ static const uint32_t kChecksumPartBOffset =
+ kChecksumPartAOffset + kUInt32Size;
+ static const uint32_t kUnalignedHeaderSize =
+ kChecksumPartBOffset + kUInt32Size;
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
// Used when consuming.
@@ -155,7 +158,7 @@ class SerializedCodeData : public SerializedData {
SerializedCodeData(const byte* data, int size)
: SerializedData(const_cast<byte*>(data), size) {}
- Vector<const byte> DataWithoutHeader() const {
+ Vector<const byte> ChecksummedContent() const {
return Vector<const byte>(data_ + kHeaderSize, size_ - kHeaderSize);
}
diff --git a/chromium/v8/src/snapshot/default-deserializer-allocator.cc b/chromium/v8/src/snapshot/default-deserializer-allocator.cc
index 610b87c771b..f3afc4d498f 100644
--- a/chromium/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/chromium/v8/src/snapshot/default-deserializer-allocator.cc
@@ -121,7 +121,7 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
}
void DefaultDeserializerAllocator::DecodeReservation(
- std::vector<SerializedData::Reservation> res) {
+ const std::vector<SerializedData::Reservation>& res) {
DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
int current_space = FIRST_SPACE;
for (auto& r : res) {
@@ -167,8 +167,7 @@ bool DefaultDeserializerAllocator::ReserveSpace(
}
Heap::Reservation builtin_reservations =
- builtin_deserializer->allocator()
- ->CreateReservationsForEagerBuiltinsAndHandlers();
+ builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
diff --git a/chromium/v8/src/snapshot/default-deserializer-allocator.h b/chromium/v8/src/snapshot/default-deserializer-allocator.h
index e6a5ba3fdcc..4a5758cc5ad 100644
--- a/chromium/v8/src/snapshot/default-deserializer-allocator.h
+++ b/chromium/v8/src/snapshot/default-deserializer-allocator.h
@@ -58,7 +58,7 @@ class DefaultDeserializerAllocator final {
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
- void DecodeReservation(std::vector<SerializedData::Reservation> res);
+ void DecodeReservation(const std::vector<SerializedData::Reservation>& res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
diff --git a/chromium/v8/src/snapshot/deserializer.cc b/chromium/v8/src/snapshot/deserializer.cc
index 3ed360e14a6..bc5805fb52f 100644
--- a/chromium/v8/src/snapshot/deserializer.cc
+++ b/chromium/v8/src/snapshot/deserializer.cc
@@ -69,8 +69,7 @@ template <class AllocatorT>
void Deserializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- // Builtins and bytecode handlers are deserialized in a separate pass by the
- // BuiltinDeserializer.
+ // Builtins are deserialized in a separate pass by the BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
// The space must be new space. Any other space would cause ReadChunk to try
@@ -179,18 +178,11 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
}
if (obj->IsAllocationSite()) {
- // Allocation sites are present in the snapshot, and must be linked into
- // a list at deserialization time.
- AllocationSite* site = AllocationSite::cast(obj);
- // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
- // as a (weak) root. If this root is relocated correctly, this becomes
- // unnecessary.
- if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
- site->set_weak_next(ReadOnlyRoots(isolate_).undefined_value());
- } else {
- site->set_weak_next(isolate_->heap()->allocation_sites_list());
- }
- isolate_->heap()->set_allocation_sites_list(site);
+ // We should link new allocation sites, but we can't do this immediately
+ // because |AllocationSite::HasWeakNext()| internally accesses
+ // |Heap::roots_| that may not have been initialized yet. So defer this to
+ // |ObjectDeserializer::CommitPostProcessedObjects()|.
+ new_allocation_sites_.push_back(AllocationSite::cast(obj));
} else if (obj->IsCode()) {
// We flush all code pages after deserializing the startup snapshot. In that
// case, we only need to remember code objects in the large object space.
@@ -209,7 +201,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
} else if (obj->IsExternalString()) {
if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
- DCHECK(string->is_short());
+ DCHECK(string->is_uncached());
string->SetResource(
isolate_, NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
@@ -225,8 +217,8 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
JSTypedArray* typed_array = JSTypedArray::cast(obj);
- CHECK(typed_array->byte_offset()->IsSmi());
- int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
+ int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
if (byte_offset > 0) {
FixedTypedArrayBase* elements =
FixedTypedArrayBase::cast(typed_array->elements());
@@ -370,11 +362,7 @@ Object* Deserializer<AllocatorT>::ReadDataSingle() {
Address current_object = kNullAddress;
CHECK(ReadData(start, end, source_space, current_object));
- HeapObject* heap_object;
- bool success = o->ToStrongHeapObject(&heap_object);
- DCHECK(success);
- USE(success);
- return heap_object;
+ return o->GetHeapObjectAssumeStrong();
}
static void NoExternalReferencesCallback() {
@@ -684,7 +672,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
SIXTEEN_CASES(kRootArrayConstants)
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ RootIndex root_index = static_cast<RootIndex>(id);
MaybeObject* object =
MaybeObject::FromObject(isolate->heap()->root(root_index));
DCHECK(!Heap::InNewSpace(object));
@@ -818,7 +806,7 @@ MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kRootArray) {
int id = source_.GetInt();
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ RootIndex root_index = static_cast<RootIndex>(id);
new_object = isolate->heap()->root(root_index);
emit_write_barrier = Heap::InNewSpace(new_object);
hot_objects_.Add(HeapObject::cast(new_object));
diff --git a/chromium/v8/src/snapshot/deserializer.h b/chromium/v8/src/snapshot/deserializer.h
index f13bc03fd44..8340a935384 100644
--- a/chromium/v8/src/snapshot/deserializer.h
+++ b/chromium/v8/src/snapshot/deserializer.h
@@ -15,6 +15,7 @@
namespace v8 {
namespace internal {
+class AllocationSite;
class HeapObject;
class Object;
@@ -71,6 +72,9 @@ class Deserializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
+ const std::vector<AllocationSite*>& new_allocation_sites() const {
+ return new_allocation_sites_;
+ }
const std::vector<Code*>& new_code_objects() const {
return new_code_objects_;
}
@@ -148,6 +152,7 @@ class Deserializer : public SerializerDeserializer {
ExternalReferenceTable* external_reference_table_;
+ std::vector<AllocationSite*> new_allocation_sites_;
std::vector<Code*> new_code_objects_;
std::vector<AccessorInfo*> accessor_infos_;
std::vector<CallHandlerInfo*> call_handler_infos_;
diff --git a/chromium/v8/src/snapshot/mksnapshot.cc b/chromium/v8/src/snapshot/mksnapshot.cc
index a2303613d64..09db077694d 100644
--- a/chromium/v8/src/snapshot/mksnapshot.cc
+++ b/chromium/v8/src/snapshot/mksnapshot.cc
@@ -165,10 +165,10 @@ class SnapshotWriter {
// present in the binary.
// For now, the straight-forward solution seems to be to just emit a pure
// .byte stream on OSX.
- WriteBinaryContentsAsByteDirective(fp, blob->data(), blob->size());
+ WriteBinaryContentsAsInlineAssembly(fp, blob->data(), blob->size());
#else
- WriteBinaryContentsAsByteDirective(fp, blob->data(),
- i::EmbeddedData::RawDataOffset());
+ WriteBinaryContentsAsInlineAssembly(fp, blob->data(),
+ i::EmbeddedData::RawDataOffset());
WriteBuiltins(fp, blob, embedded_variant);
#endif
fprintf(fp, "extern \"C\" const uint8_t v8_%s_embedded_blob_[];\n",
@@ -197,7 +197,7 @@ class SnapshotWriter {
embedded_variant, i::Builtins::name(i));
}
- WriteBinaryContentsAsByteDirective(
+ WriteBinaryContentsAsInlineAssembly(
fp,
reinterpret_cast<const uint8_t*>(blob->InstructionStartOfBuiltin(i)),
blob->PaddedInstructionSizeOfBuiltin(i));
@@ -205,34 +205,77 @@ class SnapshotWriter {
fprintf(fp, "\n");
}
- static void WriteBinaryContentsAsByteDirective(FILE* fp, const uint8_t* data,
- uint32_t size) {
- static const int kTextWidth = 80;
- int current_line_length = 0;
- int printed_chars;
+ static int WriteOcta(FILE* fp, int current_line_length, const uint8_t* data) {
+ const uint64_t* quad_ptr1 = reinterpret_cast<const uint64_t*>(data);
+ const uint64_t* quad_ptr2 = reinterpret_cast<const uint64_t*>(data + 8);
- fprintf(fp, "__asm__(\n");
- for (uint32_t i = 0; i < size; i++) {
- if (current_line_length == 0) {
- printed_chars = fprintf(fp, "%s", " \".byte ");
- DCHECK_LT(0, printed_chars);
- current_line_length += printed_chars;
- } else {
- printed_chars = fprintf(fp, ",");
- DCHECK_EQ(1, printed_chars);
- current_line_length += printed_chars;
- }
+#ifdef V8_TARGET_BIG_ENDIAN
+ uint64_t part1 = *quad_ptr1;
+ uint64_t part2 = *quad_ptr2;
+#else
+ uint64_t part1 = *quad_ptr2;
+ uint64_t part2 = *quad_ptr1;
+#endif // V8_TARGET_BIG_ENDIAN
+
+ if (part1 != 0) {
+ current_line_length +=
+ fprintf(fp, "0x%" PRIx64 "%016" PRIx64, part1, part2);
+ } else {
+ current_line_length += fprintf(fp, "0x%" PRIx64, part2);
+ }
+ return current_line_length;
+ }
- printed_chars = fprintf(fp, "0x%02x", data[i]);
+ static int WriteDirectiveOrSeparator(FILE* fp, int current_line_length,
+ const char* directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = fprintf(fp, " \"%s ", directive);
DCHECK_LT(0, printed_chars);
- current_line_length += printed_chars;
+ } else {
+ printed_chars = fprintf(fp, ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+ }
- if (current_line_length + strlen(",0xFF\\n\"") > kTextWidth) {
- fprintf(fp, "\\n\"\n");
- current_line_length = 0;
- }
+ static int WriteLineEndIfNeeded(FILE* fp, int current_line_length,
+ int write_size) {
+ static const int kTextWidth = 80;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x\\n\"") + write_size * 2 >
+ kTextWidth) {
+ fprintf(fp, "\\n\"\n");
+ return 0;
+ } else {
+ return current_line_length;
}
+ }
+
+ static void WriteBinaryContentsAsInlineAssembly(FILE* fp, const uint8_t* data,
+ uint32_t size) {
+ int current_line_length = 0;
+ fprintf(fp, "__asm__(\n");
+ uint32_t i = 0;
+ const uint32_t size_of_octa = 16;
+ for (; i <= size - size_of_octa; i += size_of_octa) {
+ current_line_length =
+ WriteDirectiveOrSeparator(fp, current_line_length, ".octa");
+ current_line_length = WriteOcta(fp, current_line_length, data + i);
+ current_line_length =
+ WriteLineEndIfNeeded(fp, current_line_length, size_of_octa);
+ }
+ if (current_line_length != 0) fprintf(fp, "\\n\"\n");
+ current_line_length = 0;
+ for (; i < size; i++) {
+ current_line_length =
+ WriteDirectiveOrSeparator(fp, current_line_length, ".byte");
+ current_line_length += fprintf(fp, "0x%x", data[i]);
+ current_line_length = WriteLineEndIfNeeded(fp, current_line_length, 1);
+ }
if (current_line_length != 0) fprintf(fp, "\\n\"\n");
fprintf(fp, ");\n");
}
@@ -307,7 +350,7 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
}
v8::StartupData CreateSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
- const char* script_source = NULL) {
+ const char* script_source = nullptr) {
// Create a new isolate and a new context from scratch, optionally run
// a script to embed, and serialize to create a snapshot blob.
v8::StartupData result = {nullptr, 0};
diff --git a/chromium/v8/src/snapshot/object-deserializer.cc b/chromium/v8/src/snapshot/object-deserializer.cc
index aabc5bf1e0f..8935c0ef892 100644
--- a/chromium/v8/src/snapshot/object-deserializer.cc
+++ b/chromium/v8/src/snapshot/object-deserializer.cc
@@ -90,6 +90,21 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
MaybeObjectHandle::Weak(script));
heap->SetRootScriptList(*list);
}
+
+ // Allocation sites are present in the snapshot, and must be linked into
+ // a list at deserialization time.
+ for (AllocationSite* site : new_allocation_sites()) {
+ if (!site->HasWeakNext()) continue;
+ // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+ // as a (weak) root. If this root is relocated correctly, this becomes
+ // unnecessary.
+ if (heap->allocation_sites_list() == Smi::kZero) {
+ site->set_weak_next(ReadOnlyRoots(heap).undefined_value());
+ } else {
+ site->set_weak_next(heap->allocation_sites_list());
+ }
+ heap->set_allocation_sites_list(site);
+ }
}
} // namespace internal
diff --git a/chromium/v8/src/snapshot/partial-serializer.cc b/chromium/v8/src/snapshot/partial-serializer.cc
index d127aa5f0a5..1f3cbc55216 100644
--- a/chromium/v8/src/snapshot/partial-serializer.cc
+++ b/chromium/v8/src/snapshot/partial-serializer.cc
@@ -6,6 +6,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/api-inl.h"
+#include "src/math-random.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -40,8 +41,7 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
ReadOnlyRoots(isolate()).undefined_value());
DCHECK(!context_->global_object()->IsUndefined());
// Reset math random cache to get fresh random numbers.
- context_->set_math_random_index(Smi::kZero);
- context_->set_math_random_cache(ReadOnlyRoots(isolate()).undefined_value());
+ MathRandom::ResetContext(context_);
VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
reinterpret_cast<Object**>(o));
@@ -59,8 +59,8 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(obj, &root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
diff --git a/chromium/v8/src/snapshot/serializer-common.h b/chromium/v8/src/snapshot/serializer-common.h
index 34a6b646769..8f547243d63 100644
--- a/chromium/v8/src/snapshot/serializer-common.h
+++ b/chromium/v8/src/snapshot/serializer-common.h
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/external-reference-table.h"
#include "src/globals.h"
+#include "src/msan.h"
#include "src/snapshot/references.h"
#include "src/v8memory.h"
#include "src/visitors.h"
@@ -39,7 +40,7 @@ class ExternalReferenceEncoder {
};
explicit ExternalReferenceEncoder(Isolate* isolate);
- ~ExternalReferenceEncoder();
+ ~ExternalReferenceEncoder(); // NOLINT (modernize-use-equals-default)
Value Encode(Address key);
Maybe<Value> TryEncode(Address key);
@@ -350,6 +351,45 @@ class SerializedData {
DISALLOW_COPY_AND_ASSIGN(SerializedData);
};
+class Checksum {
+ public:
+ explicit Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+ // Computing the checksum includes padding bytes for objects like strings.
+ // Mark every object as initialized in the code serializer.
+ MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+#endif // MEMORY_SANITIZER
+ // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
+ uintptr_t a = 1;
+ uintptr_t b = 0;
+ const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
+ DCHECK(IsAligned(payload.length(), kIntptrSize));
+ const uintptr_t* end = cur + payload.length() / kIntptrSize;
+ while (cur < end) {
+ // Unsigned overflow expected and intended.
+ a += *cur++;
+ b += a;
+ }
+#if V8_HOST_ARCH_64_BIT
+ a ^= a >> 32;
+ b ^= b >> 32;
+#endif // V8_HOST_ARCH_64_BIT
+ a_ = static_cast<uint32_t>(a);
+ b_ = static_cast<uint32_t>(b);
+ }
+
+ bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
+
+ uint32_t a() const { return a_; }
+ uint32_t b() const { return b_; }
+
+ private:
+ uint32_t a_;
+ uint32_t b_;
+
+ DISALLOW_COPY_AND_ASSIGN(Checksum);
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/snapshot/serializer.cc b/chromium/v8/src/snapshot/serializer.cc
index 56d87b89165..a8b911a1914 100644
--- a/chromium/v8/src/snapshot/serializer.cc
+++ b/chromium/v8/src/snapshot/serializer.cc
@@ -111,8 +111,7 @@ template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- // Builtins and bytecode handlers are serialized in a separate pass by the
- // BuiltinSerializer.
+ // Builtins are serialized in a separate pass by the BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
for (Object** current = start; current < end; current++) {
@@ -233,16 +232,15 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
if (!obj->IsCode()) return false;
- Code* code = Code::cast(obj);
- if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
- return (code->kind() == Code::BYTECODE_HANDLER);
+ return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
- int root_index, HeapObject* object,
+ RootIndex root, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point, int skip) {
+ int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
object->ShortPrint();
@@ -251,7 +249,7 @@ void Serializer<AllocatorT>::PutRoot(
// Assert that the first 32 root array items are a conscious choice. They are
// chosen so that the most common ones can be encoded more efficiently.
- STATIC_ASSERT(Heap::kArgumentsMarkerRootIndex ==
+ STATIC_ASSERT(static_cast<int>(RootIndex::kArgumentsMarker) ==
kNumberOfRootArrayConstants - 1);
if (how_to_code == kPlain && where_to_point == kStartOfObject &&
@@ -330,14 +328,14 @@ void Serializer<AllocatorT>::PutNextChunk(int space) {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::Pad() {
+void Serializer<AllocatorT>::Pad(int padding_offset) {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
sink_.Put(kNop, "Padding");
}
// Pad up to pointer size for checksum.
- while (!IsAligned(sink_.Position(), kPointerAlignment)) {
+ while (!IsAligned(sink_.Position() + padding_offset, kPointerAlignment)) {
sink_.Put(kNop, "Padding");
}
}
@@ -436,10 +434,10 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
if (!typed_array->is_on_heap()) {
// Explicitly serialize the backing store now.
JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
- CHECK(buffer->byte_length()->IsSmi());
- CHECK(typed_array->byte_offset()->IsSmi());
- int32_t byte_length = NumberToInt32(buffer->byte_length());
- int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
+ CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
+ int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
// We need to calculate the backing store from the external pointer
// because the ArrayBuffer may already have been serialized.
@@ -469,9 +467,8 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
- // Attempt to make sure that NumberToInt32 produces something sensible.
- CHECK(buffer->byte_length()->IsSmi());
- int32_t byte_length = NumberToInt32(buffer->byte_length());
+ CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
@@ -505,7 +502,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
}
} else {
ExternalOneByteString* string = ExternalOneByteString::cast(object_);
- DCHECK(string->is_short());
+ DCHECK(string->is_uncached());
const NativesExternalStringResource* resource =
reinterpret_cast<const NativesExternalStringResource*>(
string->resource());
@@ -581,7 +578,8 @@ class UnlinkWeakNextScope {
public:
explicit UnlinkWeakNextScope(Heap* heap, HeapObject* object)
: object_(nullptr) {
- if (object->IsAllocationSite()) {
+ if (object->IsAllocationSite() &&
+ AllocationSite::cast(object)->HasWeakNext()) {
object_ = object;
next_ = AllocationSite::cast(object)->weak_next();
AllocationSite::cast(object)->set_weak_next(
@@ -729,8 +727,7 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
HeapObject* host, MaybeObject** start, MaybeObject** end) {
MaybeObject** current = start;
while (current < end) {
- while (current < end &&
- ((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject())) {
+ while (current < end && ((*current)->IsSmi() || (*current)->IsCleared())) {
current++;
}
if (current < end) {
@@ -738,12 +735,14 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
}
HeapObject* current_contents;
HeapObjectReferenceType reference_type;
- while (current < end && (*current)->ToStrongOrWeakHeapObject(
- &current_contents, &reference_type)) {
- int root_index = serializer_->root_index_map()->Lookup(current_contents);
+ while (current < end &&
+ (*current)->GetHeapObject(&current_contents, &reference_type)) {
+ RootIndex root_index;
// Repeats are not subject to the write barrier so we can only use
// immortal immovable root members. They are never in new space.
- if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
+ if (current != start &&
+ serializer_->root_index_map()->Lookup(current_contents,
+ &root_index) &&
Heap::RootIsImmortalImmovable(root_index) &&
*current == current[-1]) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
diff --git a/chromium/v8/src/snapshot/serializer.h b/chromium/v8/src/snapshot/serializer.h
index 9427cb6c78c..5a08e4299e1 100644
--- a/chromium/v8/src/snapshot/serializer.h
+++ b/chromium/v8/src/snapshot/serializer.h
@@ -172,8 +172,8 @@ class Serializer : public SerializerDeserializer {
Object** end) override;
void SerializeRootObject(Object* object);
- void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
- int skip);
+ void PutRoot(RootIndex root_index, HeapObject* object, HowToCode how,
+ WhereToPoint where, int skip);
void PutSmi(Smi* smi);
void PutBackReference(HeapObject* object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference,
@@ -210,7 +210,8 @@ class Serializer : public SerializerDeserializer {
}
// GetInt reads 4 bytes at once, requiring padding at the end.
- void Pad();
+ // Use padding_offset to specify the space you want to use after padding.
+ void Pad(int padding_offset = 0);
// We may not need the code address map for logging for every instance
// of the serializer. Initialize it on demand.
@@ -284,6 +285,7 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
serializer_->PushStack(obj);
#endif // DEBUG
}
+ // NOLINTNEXTLINE (modernize-use-equals-default)
~ObjectSerializer() override {
#ifdef DEBUG
serializer_->PopStack();
diff --git a/chromium/v8/src/snapshot/snapshot-common.cc b/chromium/v8/src/snapshot/snapshot-common.cc
index 31f378792b3..95baef0cc0a 100644
--- a/chromium/v8/src/snapshot/snapshot-common.cc
+++ b/chromium/v8/src/snapshot/snapshot-common.cc
@@ -44,6 +44,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
const v8::StartupData* blob = isolate->snapshot_blob();
CheckVersion(blob);
+ CHECK(VerifyChecksum(blob));
Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData startup_snapshot_data(startup_data);
Vector<const byte> builtin_data = ExtractBuiltinData(blob);
@@ -136,13 +137,17 @@ void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
DCHECK_NE(Builtins::kDeserializeLazy, i);
Code* code = builtins->builtin(i);
- if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ if (code->builtin_index() == Builtins::LazyDeserializerForBuiltin(i)) {
code = Snapshot::DeserializeBuiltin(isolate, i);
}
DCHECK_EQ(i, code->builtin_index());
DCHECK_EQ(code, builtins->builtin(i));
}
+
+ // Re-initialize the dispatch table now that any bytecodes have been
+ // deserialized.
+ isolate->interpreter()->InitializeDispatchTable();
}
// static
@@ -168,42 +173,6 @@ Code* Snapshot::EnsureBuiltinIsDeserialized(Isolate* isolate,
return code;
}
-// static
-Code* Snapshot::DeserializeHandler(Isolate* isolate,
- interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale) {
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing handler %s\n",
- interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str());
- }
-
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
-
- const v8::StartupData* blob = isolate->snapshot_blob();
- Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
- BuiltinSnapshotData builtin_snapshot_data(builtin_data);
-
- CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
- BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
- Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int bytes = code->Size();
- PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
- interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
- bytes, ms);
- }
-
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- isolate->logger()->LogBytecodeHandler(bytecode, operand_scale, code);
- }
-
- return code;
-}
-
void ProfileDeserialization(
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
@@ -234,15 +203,22 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
uint32_t total_length = startup_snapshot_offset;
+ DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(builtin_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
for (const auto context_snapshot : context_snapshots) {
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
}
ProfileDeserialization(startup_snapshot, builtin_snapshot, context_snapshots);
char* data = new char[total_length];
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data, 0, StartupSnapshotOffset(num_contexts));
+
SetHeaderValue(data, kNumberOfContextsOffset, num_contexts);
SetHeaderValue(data, kRehashabilityOffset, can_be_rehashed ? 1 : 0);
@@ -292,8 +268,13 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_offset += payload_length;
}
- v8::StartupData result = {data, static_cast<int>(total_length)};
DCHECK_EQ(total_length, payload_offset);
+ v8::StartupData result = {data, static_cast<int>(total_length)};
+
+ Checksum checksum(ChecksummedContent(&result));
+ SetHeaderValue(data, kChecksumPartAOffset, checksum.a());
+ SetHeaderValue(data, kChecksumPartBOffset, checksum.b());
+
return result;
}
@@ -308,9 +289,11 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
case Builtins::TFS:
break;
- // Bytecode handlers will only ever be used by the interpreter and so there
- // will never be a need to use trampolines with them.
+ // Bytecode handlers (and their lazy deserializers) will only ever be used
+ // by the interpreter and so there will never be a need to use trampolines
+ // with them.
case Builtins::BCH:
+ case Builtins::DLH:
case Builtins::API:
case Builtins::ASM:
// TODO(jgruber): Extend checks to remaining kinds.
@@ -511,6 +494,19 @@ uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
return num_contexts;
}
+bool Snapshot::VerifyChecksum(const v8::StartupData* data) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+ uint32_t expected_a = GetHeaderValue(data, kChecksumPartAOffset);
+ uint32_t expected_b = GetHeaderValue(data, kChecksumPartBOffset);
+ Checksum checksum(ChecksummedContent(data));
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Verifying snapshot checksum took %0.3f ms]\n", ms);
+ }
+ return checksum.Check(expected_a, expected_b);
+}
+
void EmbeddedData::PrintStatistics() const {
DCHECK(FLAG_serialization_statistics);
@@ -644,12 +640,18 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
// Calculate sizes.
uint32_t reservation_size =
static_cast<uint32_t>(reservations.size()) * kUInt32Size;
+ uint32_t payload_offset = kHeaderSize + reservation_size;
+ uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
uint32_t size =
- kHeaderSize + reservation_size + static_cast<uint32_t>(payload->size());
+ padded_payload_offset + static_cast<uint32_t>(payload->size());
+ DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data_, 0, padded_payload_offset);
+
// Set header values.
SetMagicNumber(serializer->isolate());
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
@@ -660,7 +662,7 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
reservation_size);
// Copy serialized data.
- CopyBytes(data_ + kHeaderSize + reservation_size, payload->data(),
+ CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
}
@@ -679,7 +681,9 @@ std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
Vector<const byte> SnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
+ uint32_t padded_payload_offset =
+ POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
+ const byte* payload = data_ + padded_payload_offset;
uint32_t length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
return Vector<const byte>(payload, length);
@@ -689,30 +693,22 @@ BuiltinSnapshotData::BuiltinSnapshotData(const BuiltinSerializer* serializer)
: SnapshotData(serializer) {}
Vector<const byte> BuiltinSnapshotData::Payload() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
- const int builtin_offsets_size =
- BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
- uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + payload_length);
- DCHECK_GT(payload_length, builtin_offsets_size);
- return Vector<const byte>(payload, payload_length - builtin_offsets_size);
+ Vector<const byte> payload = SnapshotData::Payload();
+ const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ DCHECK_EQ(data_ + size_, payload.start() + payload.size());
+ DCHECK_GT(payload.size(), builtin_offsets_size);
+ return Vector<const byte>(payload.start(),
+ payload.size() - builtin_offsets_size);
}
Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
- const int builtin_offsets_size =
- BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
- uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + payload_length);
- DCHECK_GT(payload_length, builtin_offsets_size);
+ Vector<const byte> payload = SnapshotData::Payload();
+ const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ DCHECK_EQ(data_ + size_, payload.start() + payload.size());
+ DCHECK_GT(payload.size(), builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
- payload + payload_length - builtin_offsets_size);
- return Vector<const uint32_t>(data,
- BuiltinSnapshotUtils::kNumberOfCodeObjects);
+ payload.start() + payload.size() - builtin_offsets_size);
+ return Vector<const uint32_t>(data, Builtins::builtin_count);
}
} // namespace internal
diff --git a/chromium/v8/src/snapshot/snapshot-source-sink.h b/chromium/v8/src/snapshot/snapshot-source-sink.h
index 584f86a7605..8cf86526a36 100644
--- a/chromium/v8/src/snapshot/snapshot-source-sink.h
+++ b/chromium/v8/src/snapshot/snapshot-source-sink.h
@@ -27,7 +27,7 @@ class SnapshotByteSource final {
explicit SnapshotByteSource(Vector<const byte> payload)
: data_(payload.start()), length_(payload.length()), position_(0) {}
- ~SnapshotByteSource() {}
+ ~SnapshotByteSource() = default;
bool HasMore() { return position_ < length_; }
@@ -82,10 +82,10 @@ class SnapshotByteSource final {
*/
class SnapshotByteSink {
public:
- SnapshotByteSink() {}
+ SnapshotByteSink() = default;
explicit SnapshotByteSink(int initial_size) : data_(initial_size) {}
- ~SnapshotByteSink() {}
+ ~SnapshotByteSink() = default;
void Put(byte b, const char* description) { data_.push_back(b); }
diff --git a/chromium/v8/src/snapshot/snapshot.h b/chromium/v8/src/snapshot/snapshot.h
index b973ebb3566..9edc12c1ce6 100644
--- a/chromium/v8/src/snapshot/snapshot.h
+++ b/chromium/v8/src/snapshot/snapshot.h
@@ -175,12 +175,6 @@ class Snapshot : public AllStatic {
static Code* EnsureBuiltinIsDeserialized(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
- // Deserializes a single given handler code object. Intended to be called at
- // runtime after the isolate has been fully initialized.
- static Code* DeserializeHandler(Isolate* isolate,
- interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale);
-
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
@@ -189,6 +183,8 @@ class Snapshot : public AllStatic {
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
+ static bool VerifyChecksum(const v8::StartupData* data);
+
// ---------------- Serialization ----------------
static v8::StartupData CreateSnapshotBlob(
@@ -224,10 +220,12 @@ class Snapshot : public AllStatic {
// Snapshot blob layout:
// [0] number of contexts N
// [1] rehashability
- // [2] (128 bytes) version string
- // [3] offset to builtins
- // [4] offset to context 0
- // [5] offset to context 1
+ // [2] checksum part A
+ // [3] checksum part B
+ // [4] (128 bytes) version string
+ // [5] offset to builtins
+ // [6] offset to context 0
+ // [7] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
@@ -239,16 +237,28 @@ class Snapshot : public AllStatic {
// TODO(yangguo): generalize rehashing, and remove this flag.
static const uint32_t kRehashabilityOffset =
kNumberOfContextsOffset + kUInt32Size;
- static const uint32_t kVersionStringOffset =
+ static const uint32_t kChecksumPartAOffset =
kRehashabilityOffset + kUInt32Size;
+ static const uint32_t kChecksumPartBOffset =
+ kChecksumPartAOffset + kUInt32Size;
+ static const uint32_t kVersionStringOffset =
+ kChecksumPartBOffset + kUInt32Size;
static const uint32_t kVersionStringLength = 64;
static const uint32_t kBuiltinOffsetOffset =
kVersionStringOffset + kVersionStringLength;
static const uint32_t kFirstContextOffsetOffset =
kBuiltinOffsetOffset + kUInt32Size;
+ static Vector<const byte> ChecksummedContent(const v8::StartupData* data) {
+ const uint32_t kChecksumStart = kVersionStringOffset;
+ return Vector<const byte>(
+ reinterpret_cast<const byte*>(data->data + kChecksumStart),
+ data->raw_size - kChecksumStart);
+ }
+
static uint32_t StartupSnapshotOffset(int num_contexts) {
- return kFirstContextOffsetOffset + num_contexts * kInt32Size;
+ return POINTER_SIZE_ALIGN(kFirstContextOffsetOffset +
+ num_contexts * kInt32Size);
}
static uint32_t ContextSnapshotOffsetOffset(int index) {
diff --git a/chromium/v8/src/snapshot/startup-deserializer.cc b/chromium/v8/src/snapshot/startup-deserializer.cc
index 8fbb0737037..e9c23bb9073 100644
--- a/chromium/v8/src/snapshot/startup-deserializer.cc
+++ b/chromium/v8/src/snapshot/startup-deserializer.cc
@@ -37,7 +37,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate->heap()->IterateStrongRoots(this,
+ VISIT_ONLY_STRONG_FOR_SERIALIZATION);
isolate->heap()->RepairFreeListsAfterDeserialization();
isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
DeserializeDeferredObjects();
@@ -46,7 +47,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
- builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
+ builtin_deserializer.DeserializeEagerBuiltins();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -64,7 +65,6 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate, LogCodeObjects());
- LOG_CODE_EVENT(isolate, LogBytecodeHandlers());
LOG_CODE_EVENT(isolate, LogCompiledFunctions());
isolate->builtins()->MarkInitialized();
diff --git a/chromium/v8/src/snapshot/startup-serializer.cc b/chromium/v8/src/snapshot/startup-serializer.cc
index 9ad6cda5d10..146d413de84 100644
--- a/chromium/v8/src/snapshot/startup-serializer.cc
+++ b/chromium/v8/src/snapshot/startup-serializer.cc
@@ -34,10 +34,10 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
+ RootIndex root_index;
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ if (root_index_map()->Lookup(obj, &root_index)) {
if (root_has_been_serialized(root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@@ -136,7 +136,7 @@ void StartupSerializer::VisitRootPointers(Root root, const char* description,
// referenced using kRootArray bytecodes.
for (Object** current = start; current < end; current++) {
SerializeRootObject(*current);
- int root_index = static_cast<int>(current - start);
+ size_t root_index = static_cast<size_t>(current - start);
root_has_been_serialized_.set(root_index);
}
} else {
@@ -152,9 +152,9 @@ void StartupSerializer::CheckRehashability(HeapObject* obj) {
}
bool StartupSerializer::MustBeDeferred(HeapObject* object) {
- if (root_has_been_serialized_.test(Heap::kFreeSpaceMapRootIndex) &&
- root_has_been_serialized_.test(Heap::kOnePointerFillerMapRootIndex) &&
- root_has_been_serialized_.test(Heap::kTwoPointerFillerMapRootIndex)) {
+ if (root_has_been_serialized(RootIndex::kFreeSpaceMap) &&
+ root_has_been_serialized(RootIndex::kOnePointerFillerMap) &&
+ root_has_been_serialized(RootIndex::kTwoPointerFillerMap)) {
// All required root objects are serialized, so any aligned objects can
// be saved without problems.
return false;
diff --git a/chromium/v8/src/snapshot/startup-serializer.h b/chromium/v8/src/snapshot/startup-serializer.h
index cf334d10b20..0b2065c3d05 100644
--- a/chromium/v8/src/snapshot/startup-serializer.h
+++ b/chromium/v8/src/snapshot/startup-serializer.h
@@ -28,8 +28,8 @@ class StartupSerializer : public Serializer<> {
int PartialSnapshotCacheIndex(HeapObject* o);
bool can_be_rehashed() const { return can_be_rehashed_; }
- bool root_has_been_serialized(int root_index) const {
- return root_has_been_serialized_.test(root_index);
+ bool root_has_been_serialized(RootIndex root_index) const {
+ return root_has_been_serialized_.test(static_cast<size_t>(root_index));
}
private:
@@ -69,7 +69,7 @@ class StartupSerializer : public Serializer<> {
void CheckRehashability(HeapObject* obj);
- std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+ std::bitset<RootsTable::kEntriesCount> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
std::vector<AccessorInfo*> accessor_infos_;
std::vector<CallHandlerInfo*> call_handler_infos_;
@@ -83,8 +83,8 @@ class StartupSerializer : public Serializer<> {
class SerializedHandleChecker : public RootVisitor {
public:
SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end);
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
bool CheckGlobalAndEternalHandles();
private: