summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/basic-memory-chunk.h
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 10:22:43 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:36:28 +0000
commit271a6c3487a14599023a9106329505597638d793 (patch)
treee040d58ffc86c1480b79ca8528020ca9ec919bf8 /chromium/v8/src/heap/basic-memory-chunk.h
parent7b2ffa587235a47d4094787d72f38102089f402a (diff)
downloadqtwebengine-chromium-271a6c3487a14599023a9106329505597638d793.tar.gz
BASELINE: Update Chromium to 77.0.3865.59
Change-Id: I1e89a5f3b009a9519a6705102ad65c92fe736f21 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/basic-memory-chunk.h')
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h229
1 files changed, 229 insertions, 0 deletions
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
new file mode 100644
index 00000000000..65fc072bd24
--- /dev/null
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -0,0 +1,229 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
+#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
+
+#include <type_traits>
+
+#include "src/base/atomic-utils.h"
+#include "src/common/globals.h"
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+class MemoryChunk;
+
+class BasicMemoryChunk {
+ public:
+ enum Flag {
+ NO_FLAGS = 0u,
+ IS_EXECUTABLE = 1u << 0,
+ POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+ POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+ // A page in the from-space or a young large page that was not scavenged
+ // yet.
+ FROM_PAGE = 1u << 3,
+ // A page in the to-space or a young large page that was scavenged.
+ TO_PAGE = 1u << 4,
+ LARGE_PAGE = 1u << 5,
+ EVACUATION_CANDIDATE = 1u << 6,
+ NEVER_EVACUATE = 1u << 7,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR = 1u << 8,
+
+ // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+ // from new to old space during evacuation.
+ PAGE_NEW_OLD_PROMOTION = 1u << 9,
+
+ // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+ // within the new space during evacuation.
+ PAGE_NEW_NEW_PROMOTION = 1u << 10,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
+
+ // This flag is intended to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE = 1u << 12,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED = 1u << 13,
+
+ // |POOLED|: When actually freeing this chunk, only uncommit and do not
+ // give up the reservation as we still reuse the chunk at some point.
+ POOLED = 1u << 14,
+
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED = 1u << 15,
+
+ // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+ // on pages is sometimes aborted. The flag is used to avoid repeatedly
+ // triggering on the same page.
+ COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
+
+ // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
+ // to iterate the page.
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+
+ // The memory chunk freeing bookkeeping has been performed but the chunk has
+ // not yet been freed.
+ UNREGISTERED = 1u << 20,
+
+ // The memory chunk belongs to the read-only heap and does not participate
+ // in garbage collection. This is used instead of owner for identity
+ // checking since read-only chunks have no owner once they are detached.
+ READ_ONLY_HEAP = 1u << 21,
+ };
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ BasicMemoryChunk(size_t size, Address area_start, Address area_end);
+
+ static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
+
+ Address address() const { return reinterpret_cast<Address>(this); }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ Address area_start() const { return area_start_; }
+
+ Address area_end() const { return area_end_; }
+ void set_area_end(Address area_end) { area_end_ = area_end; }
+
+ size_t area_size() const {
+ return static_cast<size_t>(area_end() - area_start());
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ void SetFlag(Flag flag) {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ flags_ |= flag;
+ } else {
+ base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsFlagSet(Flag flag) const {
+ return (GetFlags<access_mode>() & flag) != 0;
+ }
+
+ void ClearFlag(Flag flag) { flags_ &= ~flag; }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(uintptr_t flags, uintptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ uintptr_t GetFlags() const {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ return flags_;
+ } else {
+ return base::AsAtomicWord::Relaxed_Load(&flags_);
+ }
+ }
+
+ bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
+
+ // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+
+ bool Contains(Address addr) const {
+ return addr >= area_start() && addr < area_end();
+ }
+
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
+ bool ContainsLimit(Address addr) const {
+ return addr >= area_start() && addr <= area_end();
+ }
+
+ V8_EXPORT_PRIVATE static bool HasHeaderSentinel(Address slot_addr);
+
+ void ReleaseMarkingBitmap();
+
+ static const intptr_t kSizeOffset = 0;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
+ static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
+ static const intptr_t kHeaderSentinelOffset =
+ kHeapOffset + kSystemPointerSize;
+
+ static const size_t kHeaderSize =
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address header_sentinel_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize; // Address area_end_
+
+ protected:
+ // Overall size of the chunk, including the header and guards.
+ size_t size_;
+
+ uintptr_t flags_ = NO_FLAGS;
+
+ Bitmap* marking_bitmap_ = nullptr;
+
+ // TODO(v8:7464): Find a way to remove this.
+ // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
+ // is the default it needs to live here because MemoryChunk is not standard
+ // layout under C++11.
+ Heap* heap_;
+
+ // This is used to distinguish the memory chunk header from the interior of a
+ // large page. The memory chunk header stores here an impossible tagged
+ // pointer: the tagger pointer of the page start. A field in a large object is
+ // guaranteed to not contain such a pointer.
+ Address header_sentinel_;
+
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
+ friend class BasicMemoryChunkValidator;
+};
+
+STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
+
+class BasicMemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
+ offsetof(BasicMemoryChunk, size_));
+ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ offsetof(BasicMemoryChunk, flags_));
+ STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
+ offsetof(BasicMemoryChunk, marking_bitmap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ offsetof(BasicMemoryChunk, heap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
+ offsetof(BasicMemoryChunk, header_sentinel_));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASIC_MEMORY_CHUNK_H_