summaryrefslogtreecommitdiff
path: root/chromium/base/allocator
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-03 13:42:47 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:27:51 +0000
commit8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (patch)
treed29d987c4d7b173cf853279b79a51598f104b403 /chromium/base/allocator
parent830c9e163d31a9180fadca926b3e1d7dfffb5021 (diff)
downloadqtwebengine-chromium-8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec.tar.gz
BASELINE: Update Chromium to 66.0.3359.156
Change-Id: I0c9831ad39911a086b6377b16f995ad75a51e441 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/base/allocator')
-rw-r--r--chromium/base/allocator/BUILD.gn18
-rw-r--r--chromium/base/allocator/allocator_check.cc2
-rw-r--r--chromium/base/allocator/allocator_interception_mac.mm2
-rw-r--r--chromium/base/allocator/allocator_shim.cc2
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc2
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc190
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc86
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h12
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h7
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc38
-rw-r--r--chromium/base/allocator/winheap_stubs_win.cc4
12 files changed, 291 insertions, 75 deletions
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 3aa14cd32ed..636a3420da1 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -175,6 +175,14 @@ if (use_allocator == "tcmalloc") {
#"win_allocator.cc",
]
+ # Not included on mips64el.
+ if (current_cpu == "mips64el") {
+ sources -= [
+ "$tcmalloc_dir/src/base/linuxthreads.cc",
+ "$tcmalloc_dir/src/base/linuxthreads.h",
+ ]
+ }
+
# Disable the heap checker in tcmalloc.
defines = [ "NO_HEAP_CHECK" ]
@@ -195,8 +203,10 @@ if (use_allocator == "tcmalloc") {
configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
configs += [ "//build/config/compiler:compiler_arm" ]
- # TODO(crbug.com/633719) Make tcmalloc work with AFDO if possible.
- configs -= [ "//build/config/compiler:afdo" ]
+ # TODO(crbug.com/633719) Make tcmalloc work with AFDO on GCC if possible.
+ if (!is_clang) {
+ configs -= [ "//build/config/compiler:afdo" ]
+ }
deps = []
@@ -254,8 +264,8 @@ if (use_allocator == "tcmalloc") {
}
} # use_allocator == "tcmalloc"
-buildflag_header("features") {
- header = "features.h"
+buildflag_header("buildflags") {
+ header = "buildflags.h"
flags = [ "USE_ALLOCATOR_SHIM=$use_allocator_shim" ]
}
diff --git a/chromium/base/allocator/allocator_check.cc b/chromium/base/allocator/allocator_check.cc
index fba38d151d3..5fb86467dde 100644
--- a/chromium/base/allocator/allocator_check.cc
+++ b/chromium/base/allocator/allocator_check.cc
@@ -4,7 +4,7 @@
#include "base/allocator/allocator_check.h"
-#include "base/allocator/features.h"
+#include "base/allocator/buildflags.h"
#include "build/build_config.h"
#if defined(OS_WIN)
diff --git a/chromium/base/allocator/allocator_interception_mac.mm b/chromium/base/allocator/allocator_interception_mac.mm
index 68fa5468b59..50202870860 100644
--- a/chromium/base/allocator/allocator_interception_mac.mm
+++ b/chromium/base/allocator/allocator_interception_mac.mm
@@ -27,7 +27,7 @@
#include <new>
-#include "base/allocator/features.h"
+#include "base/allocator/buildflags.h"
#include "base/allocator/malloc_zone_functions_mac.h"
#include "base/bind.h"
#include "base/logging.h"
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 4b7de5e1a32..e919f094c37 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -331,6 +331,6 @@ void InitializeAllocatorShim() {
#endif
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
- (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
+ (defined(_MSC_VER) && defined(_CPPUNWIND))
#error This code cannot be used when exceptions are turned on.
#endif
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index 2bafed559a5..3be8f2cabd5 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -11,7 +11,7 @@
#include <new>
#include <vector>
-#include "base/allocator/features.h"
+#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/atomicops.h"
#include "base/process/process_metrics.h"
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
index c8c1da4c3b5..40f494db992 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
@@ -30,7 +30,7 @@ uintptr_t GetMask() {
if (!IsWindows8Point1OrGreater()) {
mask = internal::kASLRMaskBefore8_10;
}
-#endif // defined(OS_WIN)
+#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR))
#elif defined(ARCH_CPU_32_BITS)
#if defined(OS_WIN)
BOOL is_wow64 = FALSE;
@@ -46,8 +46,17 @@ uintptr_t GetMask() {
const size_t kSamples = 100;
+uintptr_t GetAddressBits() {
+ return reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+}
+
+uintptr_t GetRandomBits() {
+ return GetAddressBits() - internal::kASLROffset;
+}
+
} // namespace
+// Configurations without ASLR are tested here.
TEST(AddressSpaceRandomizationTest, DisabledASLR) {
uintptr_t mask = GetMask();
if (!mask) {
@@ -61,72 +70,175 @@ TEST(AddressSpaceRandomizationTest, DisabledASLR) {
}
}
-TEST(AddressSpaceRandomizationTest, Unpredictable) {
+TEST(AddressSpaceRandomizationTest, Alignment) {
uintptr_t mask = GetMask();
- // Configurations without ASLR are tested above, in DisabledASLR.
if (!mask)
return;
- std::set<uintptr_t> addresses;
- uintptr_t address_logical_sum = 0;
- uintptr_t address_logical_product = static_cast<uintptr_t>(-1);
for (size_t i = 0; i < kSamples; ++i) {
- uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
- // Test that address is in range.
- EXPECT_LE(internal::kASLROffset, address);
- EXPECT_GE(internal::kASLROffset + mask, address);
- // Test that address is page aligned.
+ uintptr_t address = GetAddressBits();
EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
- // Test that address is unique (no collisions in kSamples tries)
- CHECK_EQ(0ULL, addresses.count(address));
- addresses.insert(address);
- // Sum and product to test randomness at each bit position, below.
- address -= internal::kASLROffset;
- address_logical_sum |= address;
- address_logical_product &= address;
}
- // All randomized bits in address_logical_sum should be set, since the
- // likelihood of never setting any of the bits is 1 / (2 ^ kSamples) with a
- // good RNG. Likewise, all bits in address_logical_product should be cleared.
- // Note that we don't test unmasked high bits. These may be set if kASLROffset
- // is larger than kASLRMask, or if adding kASLROffset generated a carry.
- EXPECT_EQ(mask, address_logical_sum & mask);
- EXPECT_EQ(0ULL, address_logical_product & mask);
+}
+
+TEST(AddressSpaceRandomizationTest, Range) {
+ uintptr_t mask = GetMask();
+ if (!mask)
+ return;
+
+ uintptr_t min = internal::kASLROffset;
+ uintptr_t max = internal::kASLROffset + internal::kASLRMask;
+ for (size_t i = 0; i < kSamples; ++i) {
+ uintptr_t address = GetAddressBits();
+ EXPECT_LE(min, address);
+ EXPECT_GE(max + mask, address);
+ }
}
TEST(AddressSpaceRandomizationTest, Predictable) {
uintptr_t mask = GetMask();
- // Configurations without ASLR are tested above, in DisabledASLR.
if (!mask)
return;
const uintptr_t kInitialSeed = 0xfeed5eedULL;
base::SetRandomPageBaseSeed(kInitialSeed);
- // Make sure the addresses look random but are predictable.
- std::set<uintptr_t> addresses;
std::vector<uintptr_t> sequence;
for (size_t i = 0; i < kSamples; ++i) {
uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
sequence.push_back(address);
- // Test that address is in range.
- EXPECT_LE(internal::kASLROffset, address);
- EXPECT_GE(internal::kASLROffset + mask, address);
- // Test that address is page aligned.
- EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
- // Test that address is unique (no collisions in kSamples tries)
- CHECK_EQ(0ULL, addresses.count(address));
- addresses.insert(address);
- // Test that (address - offset) == (predicted & mask).
- address -= internal::kASLROffset;
}
- // Make sure sequence is repeatable.
base::SetRandomPageBaseSeed(kInitialSeed);
+
for (size_t i = 0; i < kSamples; ++i) {
uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
EXPECT_EQ(address, sequence[i]);
}
}
+// This randomness test is adapted from V8's PRNG tests.
+
+// Chi squared for getting m 0s out of n bits.
+double ChiSquared(int m, int n) {
+ double ys_minus_np1 = (m - n / 2.0);
+ double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
+ double ys_minus_np2 = ((n - m) - n / 2.0);
+ double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
+ return chi_squared_1 + chi_squared_2;
+}
+
+// Test for correlations between recent bits from the PRNG, or bits that are
+// biased.
+void RandomBitCorrelation(int random_bit) {
+ uintptr_t mask = GetMask();
+ if ((mask & (1ULL << random_bit)) == 0)
+ return; // bit is always 0.
+
+#ifdef DEBUG
+ constexpr int kHistory = 2;
+ constexpr int kRepeats = 1000;
+#else
+ constexpr int kHistory = 8;
+ constexpr int kRepeats = 10000;
+#endif
+ constexpr int kPointerBits = 8 * sizeof(void*);
+ uintptr_t history[kHistory];
+ // The predictor bit is either constant 0 or 1, or one of the bits from the
+ // history.
+ for (int predictor_bit = -2; predictor_bit < kPointerBits; predictor_bit++) {
+ // The predicted bit is one of the bits from the PRNG.
+ for (int ago = 0; ago < kHistory; ago++) {
+ // We don't want to check whether each bit predicts itself.
+ if (ago == 0 && predictor_bit == random_bit)
+ continue;
+
+ // Enter the new random value into the history.
+ for (int i = ago; i >= 0; i--) {
+ history[i] = GetRandomBits();
+ }
+
+ // Find out how many of the bits are the same as the prediction bit.
+ int m = 0;
+ for (int i = 0; i < kRepeats; i++) {
+ uintptr_t random = GetRandomBits();
+ for (int j = ago - 1; j >= 0; j--)
+ history[j + 1] = history[j];
+ history[0] = random;
+
+ int predicted;
+ if (predictor_bit >= 0) {
+ predicted = (history[ago] >> predictor_bit) & 1;
+ } else {
+ predicted = predictor_bit == -2 ? 0 : 1;
+ }
+ int bit = (random >> random_bit) & 1;
+ if (bit == predicted)
+ m++;
+ }
+
+ // Chi squared analysis for k = 2 (2, states: same/not-same) and one
+ // degree of freedom (k - 1).
+ double chi_squared = ChiSquared(m, kRepeats);
+ // For 1 degree of freedom this corresponds to 1 in a million. We are
+ // running ~8000 tests, so that would be surprising.
+ CHECK_GE(24, chi_squared);
+ // If the predictor bit is a fixed 0 or 1 then it makes no sense to
+ // repeat the test with a different age.
+ if (predictor_bit < 0)
+ break;
+ }
+ }
+}
+
+// Tests are fairly slow, so give each random bit its own test.
+#define TEST_RANDOM_BIT(BIT) \
+ TEST(AddressSpaceRandomizationTest, RandomBitCorrelations##BIT) { \
+ RandomBitCorrelation(BIT); \
+ }
+
+// The first 12 bits on all platforms are always 0.
+TEST_RANDOM_BIT(12)
+TEST_RANDOM_BIT(13)
+TEST_RANDOM_BIT(14)
+TEST_RANDOM_BIT(15)
+TEST_RANDOM_BIT(16)
+TEST_RANDOM_BIT(17)
+TEST_RANDOM_BIT(18)
+TEST_RANDOM_BIT(19)
+TEST_RANDOM_BIT(20)
+TEST_RANDOM_BIT(21)
+TEST_RANDOM_BIT(22)
+TEST_RANDOM_BIT(23)
+TEST_RANDOM_BIT(24)
+TEST_RANDOM_BIT(25)
+TEST_RANDOM_BIT(26)
+TEST_RANDOM_BIT(27)
+TEST_RANDOM_BIT(28)
+TEST_RANDOM_BIT(29)
+TEST_RANDOM_BIT(30)
+TEST_RANDOM_BIT(31)
+#if defined(ARCH_CPU_64_BITS)
+TEST_RANDOM_BIT(32)
+TEST_RANDOM_BIT(33)
+TEST_RANDOM_BIT(34)
+TEST_RANDOM_BIT(35)
+TEST_RANDOM_BIT(36)
+TEST_RANDOM_BIT(37)
+TEST_RANDOM_BIT(38)
+TEST_RANDOM_BIT(39)
+TEST_RANDOM_BIT(40)
+TEST_RANDOM_BIT(41)
+TEST_RANDOM_BIT(42)
+TEST_RANDOM_BIT(43)
+TEST_RANDOM_BIT(44)
+TEST_RANDOM_BIT(45)
+TEST_RANDOM_BIT(46)
+TEST_RANDOM_BIT(47)
+TEST_RANDOM_BIT(48)
+// No platforms have more than 48 address bits.
+#endif // defined(ARCH_CPU_64_BITS)
+
+#undef TEST_RANDOM_BIT
+
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 61cd43b1837..10568300e0e 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -11,19 +11,24 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/numerics/checked_math.h"
#include "build/build_config.h"
-#if defined(OS_MACOSX)
-#include <mach/mach.h>
-#endif
-
#if defined(OS_POSIX)
#include <errno.h>
#include <sys/mman.h>
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#endif
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif
+
#ifndef MADV_FREE
#define MADV_FREE MADV_DONTNEED
#endif
@@ -46,14 +51,37 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PROT_READ | PROT_WRITE;
case PageReadExecute:
return PROT_READ | PROT_EXEC;
+ case PageReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
default:
NOTREACHED();
- // Fall through.
+ FALLTHROUGH;
case PageInaccessible:
return PROT_NONE;
}
}
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+// On Linux, multiple guarded memory regions may exceed the process address
+// space limit. This function will raise or lower the limit by |amount|.
+bool AdjustAddressSpaceLimit(int64_t amount) {
+ struct rlimit old_rlimit;
+ if (getrlimit(RLIMIT_AS, &old_rlimit))
+ return false;
+ const rlim_t new_limit =
+ CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
+ const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
+ old_rlimit.rlim_max};
+ // setrlimit will fail if limit > old_rlimit.rlim_max.
+ return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
+}
+
+// Current WASM guarded memory regions have 8 GiB of address space. There are
+// schemes that reduce that to 4 GiB.
+constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32; // 4 GiB
+
+#endif // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
#elif defined(OS_WIN)
#include <windows.h>
@@ -72,9 +100,11 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PAGE_READWRITE;
case PageReadExecute:
return PAGE_EXECUTE_READ;
+ case PageReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
default:
NOTREACHED();
- // Fall through.
+ FALLTHROUGH;
case PageInaccessible:
return PAGE_NOACCESS;
}
@@ -96,6 +126,7 @@ size_t s_reservation_size = 0;
static void* SystemAllocPages(void* hint,
size_t length,
PageAccessibilityConfiguration page_accessibility,
+ PageTag page_tag,
bool commit) {
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
@@ -113,8 +144,10 @@ static void* SystemAllocPages(void* hint,
#if defined(OS_MACOSX)
// Use a custom tag to make it easier to distinguish partition alloc regions
- // in vmmap.
- int fd = VM_MAKE_TAG(254);
+ // in vmmap. Tags between 240-255 are supported.
+ DCHECK_LE(PageTag::kFirst, page_tag);
+ DCHECK_GE(PageTag::kLast, page_tag);
+ int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else
int fd = -1;
#endif
@@ -132,15 +165,18 @@ static void* AllocPagesIncludingReserved(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
+ PageTag page_tag,
bool commit) {
- void* ret = SystemAllocPages(address, length, page_accessibility, commit);
+ void* ret =
+ SystemAllocPages(address, length, page_accessibility, page_tag, commit);
if (ret == nullptr) {
const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
- ret = SystemAllocPages(address, length, page_accessibility, commit);
+ ret = SystemAllocPages(address, length, page_accessibility, page_tag,
+ commit);
}
}
return ret;
@@ -182,7 +218,8 @@ static void* TrimMapping(void* base,
// aligned address within the freed range.
ret = reinterpret_cast<char*>(base) + pre_slack;
FreePages(base, base_length);
- ret = SystemAllocPages(ret, trim_length, page_accessibility, commit);
+ ret = SystemAllocPages(ret, trim_length, page_accessibility,
+ PageTag::kChromium, commit);
}
#endif
@@ -195,6 +232,7 @@ void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration page_accessibility,
+ PageTag page_tag,
bool commit) {
DCHECK(length >= kPageAllocationGranularity);
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
@@ -207,6 +245,19 @@ void* AllocPages(void* address,
uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // On 64 bit Linux, we may need to adjust the address space limit for
+ // guarded allocations.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK_EQ(PageInaccessible, page_accessibility);
+ CHECK(!commit);
+ if (AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
+ DLOG(WARNING) << "Could not address space by " << length;
+ // Fall through. Try the allocation, since we may have a reserve.
+ }
+ }
+#endif
+
// If the client passed null as the address, choose a good one.
if (address == nullptr) {
address = GetRandomPageBase();
@@ -225,7 +276,7 @@ void* AllocPages(void* address,
#endif
for (int i = 0; i < kExactSizeTries; ++i) {
void* ret = AllocPagesIncludingReserved(address, length, page_accessibility,
- commit);
+ page_tag, commit);
if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
@@ -261,7 +312,7 @@ void* AllocPages(void* address,
// Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
ret = AllocPagesIncludingReserved(address, try_length, page_accessibility,
- commit);
+ page_tag, commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret != nullptr &&
@@ -278,6 +329,12 @@ void FreePages(void* address, size_t length) {
#if defined(OS_POSIX)
int ret = munmap(address, length);
CHECK(!ret);
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // On 64 bit Linux, restore the address space limit.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
+ }
+#endif
#else
BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
CHECK(ret);
@@ -381,7 +438,8 @@ bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
subtle::SpinLock::Guard guard(s_reserveLock.Get());
if (s_reservation_address == nullptr) {
- void* mem = SystemAllocPages(nullptr, size, PageInaccessible, false);
+ void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
+ PageTag::kChromium, false);
if (mem != nullptr) {
// We guarantee this alignment when reserving address space.
DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
index a39b4e3013e..ff0ab6f8cd1 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -43,6 +43,17 @@ enum PageAccessibilityConfiguration {
PageInaccessible,
PageReadWrite,
PageReadExecute,
+ // This flag is deprecated and will go away soon.
+ // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
+ PageReadWriteExecute,
+};
+
+// Mac OSX supports tagged memory regions, to help in debugging.
+enum class PageTag {
+ kFirst = 240, // Minimum tag value.
+ kChromium = 254, // Chromium page, including off-heap V8 ArrayBuffers.
+ kV8 = 255, // V8 heap pages.
+ kLast = kV8 // Maximum tag value.
};
// Allocate one or more pages.
@@ -63,6 +74,7 @@ BASE_EXPORT void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration page_accessibility,
+ PageTag tag = PageTag::kChromium,
bool commit = true);
// Free one or more pages starting at |address| and continuing for |length|
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 0ca9522bcb0..0c4b91f92fa 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -35,7 +35,8 @@ static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <=
base::kSystemPageSize,
"page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size.
-static_assert(base::kGenericMaxDirectMapped <= 1UL << 31,
+static_assert(base::kGenericMaxDirectMapped <=
+ (1UL << 31) + base::kPageAllocationGranularity,
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket");
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index 6e5143b094a..f4a4f0ac6b6 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -206,7 +206,8 @@ static const size_t kGenericMaxBucketed =
static const size_t kGenericMinDirectMappedDownsize =
kGenericMaxBucketed +
1; // Limit when downsizing a direct mapping using realloc().
-static const size_t kGenericMaxDirectMapped = 1UL << 31; // 2 GiB
+static const size_t kGenericMaxDirectMapped =
+ (1UL << 31) + kPageAllocationGranularity; // 2 GiB plus one more page.
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// Constants for the memory reclaim logic.
@@ -619,11 +620,11 @@ class BASE_EXPORT PartitionAllocHooks {
// Chained allocation hooks are not supported. Registering a non-null
// hook when a non-null hook is already registered indicates somebody is
// trying to overwrite a hook.
- DCHECK(!hook || !allocation_hook_) << "Overwriting allocation hook";
+ CHECK(!hook || !allocation_hook_) << "Overwriting allocation hook";
allocation_hook_ = hook;
}
static void SetFreeHook(FreeHook* hook) {
- DCHECK(!hook || !free_hook_) << "Overwriting free hook";
+ CHECK(!hook || !free_hook_) << "Overwriting free hook";
free_hook_ = hook;
}
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index b84db9b7308..38d2a4a95dc 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -19,9 +19,10 @@
#if defined(OS_POSIX)
#include <sys/mman.h>
+#if !defined(OS_FUCHSIA)
#include <sys/resource.h>
+#endif
#include <sys/time.h>
-
#endif // defined(OS_POSIX)
#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
@@ -46,7 +47,7 @@ bool SetAddressSpaceLimit() {
#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
// 32 bits => address space is limited already.
return true;
-#elif defined(OS_POSIX) && !defined(OS_MACOSX)
+#elif defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
// macOS will accept, but not enforce, |RLIMIT_AS| changes. See
// https://crbug.com/435269 and rdar://17576114.
//
@@ -69,7 +70,7 @@ bool SetAddressSpaceLimit() {
}
bool ClearAddressSpaceLimit() {
-#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
+#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX) || defined(OS_FUCHSIA)
return true;
#elif defined(OS_POSIX)
struct rlimit limit;
@@ -248,13 +249,34 @@ void FreeFullPage(PartitionPage* page) {
}
#if defined(OS_LINUX)
-bool IsPageInCore(void* ptr) {
+bool KernelSupportsMadvFree() {
+ int32_t major_version;
+ int32_t minor_version;
+ int32_t bugfix_version;
+ SysInfo::OperatingSystemVersionNumbers(&major_version, &minor_version,
+ &bugfix_version);
+ return std::vector<int32_t>{major_version, minor_version, bugfix_version} >=
+ std::vector<int32_t>{4, 5};
+}
+
+bool CheckPageInCore(void* ptr, bool in_core) {
+ // If the kernel supports MADV_FREE, then pages may still be in core to be
+ // reclaimed by the OS later. This is a cool optimization that prevents the
+ // kernel from freeing and allocating memory in case the app needs more memory
+ // soon -- it can just reuse the memory already allocated. Unfortunately,
+ // there's no way to test if a page is in core because it needs to be, or if
+ // it just hasn't been reclaimed yet.
+ static bool kernel_supports_madv_free = KernelSupportsMadvFree();
+ if (kernel_supports_madv_free)
+ return true;
+
unsigned char ret = 0;
EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
- return (ret & 1) != 0;
+ return in_core == (ret & 1);
}
-#define CHECK_PAGE_IN_CORE(ptr, in_core) EXPECT_EQ(IsPageInCore(ptr), in_core);
+#define CHECK_PAGE_IN_CORE(ptr, in_core) \
+ EXPECT_TRUE(CheckPageInCore(ptr, in_core))
#else
#define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
#endif // defined(OS_LINUX)
@@ -340,7 +362,7 @@ TEST(PageAllocatorTest, AllocFailure) {
return;
void* result = base::AllocPages(nullptr, size, kPageAllocationGranularity,
- PageInaccessible);
+ PageInaccessible, PageTag::kChromium, false);
if (result == nullptr) {
// We triggered allocation failure. Our reservation should have been
// released, and we should be able to make a new reservation.
@@ -1348,7 +1370,7 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
// Disable this test on Android because, due to its allocation-heavy behavior,
// it tends to get OOM-killed rather than pass.
-#if defined(OS_MACOSX) || defined(OS_ANDROID)
+#if defined(OS_MACOSX) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
#define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
#else
#define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
diff --git a/chromium/base/allocator/winheap_stubs_win.cc b/chromium/base/allocator/winheap_stubs_win.cc
index 593e386ed9e..8aa52981fa3 100644
--- a/chromium/base/allocator/winheap_stubs_win.cc
+++ b/chromium/base/allocator/winheap_stubs_win.cc
@@ -78,9 +78,9 @@ size_t WinHeapGetSizeEstimate(void* ptr) {
// Call the new handler, if one has been set.
// Returns true on successfully calling the handler, false otherwise.
bool WinCallNewHandler(size_t size) {
-#if !defined(_HAS_EXCEPTIONS) || _HAS_EXCEPTIONS
+#ifdef _CPPUNWIND
#error "Exceptions in allocator shim are not supported!"
-#endif // defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS
+#endif // _CPPUNWIND
// Get the current new handler.
_PNH nh = _query_new_handler();
if (!nh)