diff options
Diffstat (limited to 'deps/v8/src/base/platform')
21 files changed, 689 insertions, 1208 deletions
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS new file mode 100644 index 0000000000..5deaa67ce7 --- /dev/null +++ b/deps/v8/src/base/platform/OWNERS @@ -0,0 +1,6 @@ +set noparent + +hpayer@chromium.org +mlippautz@chromium.org + +# COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc index 6df8599def..165651aae1 100644 --- a/deps/v8/src/base/platform/condition-variable.cc +++ b/deps/v8/src/base/platform/condition-variable.cc @@ -28,7 +28,7 @@ ConditionVariable::ConditionVariable() { DCHECK_EQ(0, result); result = pthread_condattr_destroy(&attr); #else - int result = pthread_cond_init(&native_handle_, NULL); + int result = pthread_cond_init(&native_handle_, nullptr); #endif DCHECK_EQ(0, result); USE(result); diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h index f9a9ef4361..3406831cbe 100644 --- a/deps/v8/src/base/platform/elapsed-timer.h +++ b/deps/v8/src/base/platform/elapsed-timer.h @@ -56,7 +56,7 @@ class ElapsedTimer final { DCHECK(IsStarted()); TimeTicks ticks = Now(); TimeDelta elapsed = ticks - start_ticks_; - DCHECK(elapsed.InMicroseconds() >= 0); + DCHECK_GE(elapsed.InMicroseconds(), 0); start_ticks_ = ticks; DCHECK(IsStarted()); return elapsed; @@ -67,7 +67,7 @@ class ElapsedTimer final { TimeDelta Elapsed() const { DCHECK(IsStarted()); TimeDelta elapsed = Now() - start_ticks_; - DCHECK(elapsed.InMicroseconds() >= 0); + DCHECK_GE(elapsed.InMicroseconds(), 0); return elapsed; } diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc index 191f07ffb1..a044075c16 100644 --- a/deps/v8/src/base/platform/mutex.cc +++ b/deps/v8/src/base/platform/mutex.cc @@ -25,7 +25,7 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) { result = pthread_mutexattr_destroy(&attr); #else // Use a fast mutex (default attributes). - result = pthread_mutex_init(mutex, NULL); + result = pthread_mutex_init(mutex, nullptr); #endif // defined(DEBUG) DCHECK_EQ(0, result); USE(result); diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc index 6c1bde7b85..39559552bb 100644 --- a/deps/v8/src/base/platform/platform-aix.cc +++ b/deps/v8/src/base/platform/platform-aix.cc @@ -49,116 +49,24 @@ const char* AIXTimezoneCache::LocalTimezone(double time) { time_t tv = static_cast<time_t>(floor(time / msPerSecond)); struct tm tm; struct tm* t = localtime_r(&tv, &tm); - if (NULL == t) return ""; + if (nullptr == t) return ""; return tzname[0]; // The location of the timezone string on AIX. } double AIXTimezoneCache::LocalTimeOffset() { // On AIX, struct tm does not contain a tm_gmtoff field. - time_t utc = time(NULL); - DCHECK(utc != -1); + time_t utc = time(nullptr); + DCHECK_NE(utc, -1); struct tm tm; struct tm* loc = localtime_r(&utc, &tm); - DCHECK(loc != NULL); + DCHECK_NOT_NULL(loc); return static_cast<double>((mktime(loc) - utc) * msPerSecond); } TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, - kMmapFdOffset); - - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return nullptr; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - - if (mprotect(address, size, prot) == -1) return false; - - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mprotect(address, size, PROT_NONE) != -1; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { return true; } - static unsigned StringToLong(char* buffer) { - return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT + return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT } std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { @@ -193,7 +101,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { if (buffer[3] != 'x') continue; char* start_of_path = index(buffer, '/'); // There may be no filename in this line. Skip to next. - if (start_of_path == NULL) continue; + if (start_of_path == nullptr) continue; buffer[bytes_read] = 0; result.push_back(SharedLibraryAddress(start_of_path, start, end)); } @@ -201,7 +109,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) {} +void OS::SignalCodeMovingGC() {} } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index f20c530d67..eabd53570f 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -28,25 +28,37 @@ namespace base { namespace { -// The VirtualMemory implementation is taken from platform-win32.cc. -// The mmap-based virtual memory implementation as it is used on most posix -// platforms does not work well because Cygwin does not support MAP_FIXED. -// This causes VirtualMemory::Commit to not always commit the memory region -// specified. - -static void* RandomizedVirtualAlloc(size_t size, int action, int protection, - void* hint) { - LPVOID base = NULL; - - if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { - // For exectutable pages try and randomize the allocation address - base = VirtualAlloc(hint, size, action, protection); +// The memory allocation implementation is taken from platform-win32.cc. + +DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + return PAGE_NOACCESS; + case OS::MemoryPermission::kReadWrite: + return PAGE_READWRITE; + case OS::MemoryPermission::kReadWriteExecute: + return PAGE_EXECUTE_READWRITE; + case OS::MemoryPermission::kReadExecute: + return PAGE_EXECUTE_READ; + } + UNREACHABLE(); +} + +uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, + void* hint) { + LPVOID base = nullptr; + + // For executable or reserved pages try to use the address hint. + if (protect != PAGE_READWRITE) { + base = VirtualAlloc(hint, size, flags, protect); } - // After three attempts give up and let the OS find an address to use. - if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + // If that fails, let the OS find an address to use. + if (base == nullptr) { + base = VirtualAlloc(nullptr, size, flags, protect); + } - return base; + return reinterpret_cast<uint8_t*>(base); } } // namespace @@ -64,93 +76,97 @@ const char* CygwinTimezoneCache::LocalTimezone(double time) { time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); struct tm tm; struct tm* t = localtime_r(&tv, &tm); - if (NULL == t) return ""; + if (nullptr == t) return ""; return tzname[0]; // The location of the timezone string on Cygwin. } double CygwinTimezoneCache::LocalTimeOffset() { // On Cygwin, struct tm does not contain a tm_gmtoff field. - time_t utc = time(NULL); - DCHECK(utc != -1); + time_t utc = time(nullptr); + DCHECK_NE(utc, -1); struct tm tm; struct tm* loc = localtime_r(&utc, &tm); - DCHECK(loc != NULL); + DCHECK_NOT_NULL(loc); // time - localtime includes any daylight savings offset, so subtract it. return static_cast<double>((mktime(loc) - utc) * msPerSecond - (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); } -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - // static -void* OS::ReserveRegion(size_t size, void* hint) { - return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint); -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - hint = AlignedAddress(hint, alignment); - DCHECK((alignment % OS::AllocateAlignment()) == 0); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); - void* address = ReserveRegion(request_size, hint); - if (address == NULL) { - *allocated = 0; - return nullptr; +void* OS::Allocate(void* address, size_t size, size_t alignment, + MemoryPermission access) { + size_t page_size = AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + DCHECK_LE(page_size, alignment); + address = AlignedAddress(address, alignment); + + DWORD flags = (access == OS::MemoryPermission::kNoAccess) + ? MEM_RESERVE + : MEM_RESERVE | MEM_COMMIT; + DWORD protect = GetProtectionFromMemoryPermission(access); + + // First, try an exact size aligned allocation. + uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address); + if (base == nullptr) return nullptr; // Can't allocate, we're OOM. + + // If address is suitably aligned, we're done. + uint8_t* aligned_base = RoundUp(base, alignment); + if (base == aligned_base) return reinterpret_cast<void*>(base); + + // Otherwise, free it and try a larger allocation. + CHECK(Free(base, size)); + + // Clear the hint. It's unlikely we can allocate at this address. + address = nullptr; + + // Add the maximum misalignment so we are guaranteed an aligned base address + // in the allocated region. + size_t padded_size = size + (alignment - page_size); + const int kMaxAttempts = 3; + aligned_base = nullptr; + for (int i = 0; i < kMaxAttempts; ++i) { + base = RandomizedVirtualAlloc(padded_size, flags, protect, address); + if (base == nullptr) return nullptr; // Can't allocate, we're OOM. + + // Try to trim the allocation by freeing the padded allocation and then + // calling VirtualAlloc at the aligned base. + CHECK(Free(base, padded_size)); + aligned_base = RoundUp(base, alignment); + base = reinterpret_cast<uint8_t*>( + VirtualAlloc(aligned_base, size, flags, protect)); + // We might not get the reduced allocation due to a race. In that case, + // base will be nullptr. + if (base != nullptr) break; } - uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment); - // Try reducing the size by freeing and then reallocating a specific area. - bool result = ReleaseRegion(address, request_size); - USE(result); - DCHECK(result); - address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); - if (address != nullptr) { - request_size = size; - DCHECK(base == static_cast<uint8_t*>(address)); - } else { - // Resizing failed, just go with a bigger area. - address = ReserveRegion(request_size, hint); - if (address == nullptr) { - *allocated = 0; - return nullptr; - } - } - - *allocated = request_size; - return static_cast<void*>(address); + DCHECK_EQ(base, aligned_base); + return reinterpret_cast<void*>(base); } // static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) { - return false; - } - return true; +bool OS::Free(void* address, const size_t size) { + DCHECK_EQ(0, static_cast<uintptr_t>(address) % AllocatePageSize()); + DCHECK_EQ(0, size % AllocatePageSize()); + USE(size); + return VirtualFree(address, 0, MEM_RELEASE) != 0; } // static -bool OS::UncommitRegion(void* address, size_t size) { +bool OS::Release(void* address, size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); return VirtualFree(address, size, MEM_DECOMMIT) != 0; } // static -bool OS::ReleaseRegion(void* address, size_t size) { - return VirtualFree(address, 0, MEM_RELEASE) != 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return VirtualFree(address, size, MEM_DECOMMIT) != 0; +bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + if (access == MemoryPermission::kNoAccess) { + return VirtualFree(address, size, MEM_DECOMMIT) != 0; + } + DWORD protect = GetProtectionFromMemoryPermission(access); + return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr; } // static @@ -165,7 +181,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return result; + if (fp == nullptr) return result; // Allocate enough room to be able to store a full file name. const int kLibNameLen = FILENAME_MAX + 1; @@ -193,7 +209,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { ungetc(c, fp); // Push the '/' back into the stream to be read below. // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + if (fgets(lib_name, kLibNameLen, fp) == nullptr) break; // Drop the newline character read by fgets. We do not need to check // for a zero-length string because we know that we at least read the @@ -219,7 +235,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) { +void OS::SignalCodeMovingGC() { // Nothing to do on Cygwin. } diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc index a1eb7e8928..2b9779b843 100644 --- a/deps/v8/src/base/platform/platform-freebsd.cc +++ b/deps/v8/src/base/platform/platform-freebsd.cc @@ -40,106 +40,8 @@ TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = - mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset); - - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - hint = AlignedAddress(hint, alignment); - DCHECK((alignment % OS::AllocateAlignment()) == 0); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - static unsigned StringToLong(char* buffer) { - return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT + return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT } std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { @@ -174,7 +76,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { if (buffer[3] != 'x') continue; char* start_of_path = index(buffer, '/'); // There may be no filename in this line. Skip to next. - if (start_of_path == NULL) continue; + if (start_of_path == nullptr) continue; buffer[bytes_read] = 0; result.push_back(SharedLibraryAddress(start_of_path, start, end)); } @@ -182,7 +84,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) {} +void OS::SignalCodeMovingGC() {} } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 16e6f1d2b0..83a8a23c48 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -13,79 +13,73 @@ namespace v8 { namespace base { -TimezoneCache* OS::CreateTimezoneCache() { - return new PosixDefaultTimezoneCache(); +namespace { + +uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + return 0; // no permissions + case OS::MemoryPermission::kReadWrite: + return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE; + case OS::MemoryPermission::kReadWriteExecute: + return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | + ZX_VM_FLAG_PERM_EXECUTE; + case OS::MemoryPermission::kReadExecute: + return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE; + } + UNREACHABLE(); } -// static -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217. - return nullptr; -} +} // namespace -// static -void OS::Guard(void* address, size_t size) { - CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(), - reinterpret_cast<uintptr_t>(address), size, - 0 /*no permissions*/)); -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - zx_handle_t vmo; - if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr; - uintptr_t result; - zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size, - 0 /*no permissions*/, &result); - zx_handle_close(vmo); - if (status != ZX_OK) return nullptr; - return reinterpret_cast<void*>(result); +TimezoneCache* OS::CreateTimezoneCache() { + return new PosixDefaultTimezoneCache(); } // static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); +void* OS::Allocate(void* address, size_t size, size_t alignment, + OS::MemoryPermission access) { + size_t page_size = OS::AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + address = AlignedAddress(address, alignment); + // Add the maximum misalignment so we are guaranteed an aligned base address. + size_t request_size = size + (alignment - page_size); zx_handle_t vmo; if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) { - *allocated = 0; return nullptr; } static const char kVirtualMemoryName[] = "v8-virtualmem"; zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName, strlen(kVirtualMemoryName)); uintptr_t reservation; + uint32_t prot = GetProtectionFromMemoryPermission(access); zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, request_size, - 0 /*no permissions*/, &reservation); + prot, &reservation); // Either the vmo is now referenced by the vmar, or we failed and are bailing, // so close the vmo either way. zx_handle_close(vmo); if (status != ZX_OK) { - *allocated = 0; return nullptr; } uint8_t* base = reinterpret_cast<uint8_t*>(reservation); uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); // Unmap extra memory reserved before and after the desired block. if (aligned_base != base) { + DCHECK_LT(base, aligned_base); size_t prefix_size = static_cast<size_t>(aligned_base - base); zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base), prefix_size); request_size -= prefix_size; } - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); + size_t aligned_size = RoundUp(size, page_size); if (aligned_size != request_size) { + DCHECK_LT(aligned_size, request_size); size_t suffix_size = request_size - aligned_size; zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(aligned_base + aligned_size), @@ -94,37 +88,33 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, } DCHECK(aligned_size == request_size); - - *allocated = aligned_size; return static_cast<void*>(aligned_base); } // static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | - (is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0); - return zx_vmar_protect(zx_vmar_root_self(), - reinterpret_cast<uintptr_t>(address), size, - prot) == ZX_OK; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return zx_vmar_protect(zx_vmar_root_self(), - reinterpret_cast<uintptr_t>(address), size, - 0 /*no permissions*/) == ZX_OK; +bool OS::Free(void* address, const size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize()); + DCHECK_EQ(0, size % AllocatePageSize()); + return zx_vmar_unmap(zx_vmar_root_self(), + reinterpret_cast<uintptr_t>(address), size) == ZX_OK; } // static -bool OS::ReleaseRegion(void* address, size_t size) { +bool OS::Release(void* address, size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); return zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(address), size) == ZX_OK; } // static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return zx_vmar_unmap(zx_vmar_root_self(), - reinterpret_cast<uintptr_t>(address), size) == ZX_OK; +bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + uint32_t prot = GetProtectionFromMemoryPermission(access); + return zx_vmar_protect(zx_vmar_root_self(), + reinterpret_cast<uintptr_t>(address), size, + prot) == ZX_OK; } // static @@ -138,7 +128,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return std::vector<SharedLibraryAddress>(); } -void OS::SignalCodeMovingGC(void* hint) { +void OS::SignalCodeMovingGC() { CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217. } diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc index 2299a2c3e3..725ad0c6eb 100644 --- a/deps/v8/src/base/platform/platform-linux.cc +++ b/deps/v8/src/base/platform/platform-linux.cc @@ -93,109 +93,13 @@ TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, - kMmapFdOffset); - if (mbase == MAP_FAILED) return nullptr; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = - mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return nullptr; - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd, - kMmapFdOffset)) { - return false; - } - - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { return true; } - std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<SharedLibraryAddress> result; // This function assumes that the layout of the file is as follows: // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return result; + if (fp == nullptr) return result; // Allocate enough room to be able to store a full file name. const int kLibNameLen = FILENAME_MAX + 1; @@ -203,11 +107,15 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { // This loop will terminate once the scanning hits an EOF. while (true) { - uintptr_t start, end; + uintptr_t start, end, offset; char attr_r, attr_w, attr_x, attr_p; // Parse the addresses and permission bits at the beginning of the line. if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + if (fscanf(fp, "%" V8PRIxPTR, &offset) != 1) break; + + // Adjust {start} based on {offset}. + start -= offset; int c; if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { @@ -224,7 +132,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { ungetc(c, fp); // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + if (fgets(lib_name, kLibNameLen, fp) == nullptr) break; // Drop the newline character read by fgets. We do not need to check // for a zero-length string because we know that we at least read the @@ -250,7 +158,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) { +void OS::SignalCodeMovingGC() { // Support for ll_prof.py. // // The Linux profiler built into the kernel logs all mmap's with @@ -261,14 +169,14 @@ void OS::SignalCodeMovingGC(void* hint) { // kernel log. long size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int) FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); - if (f == NULL) { + if (f == nullptr) { OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); OS::Abort(); } - void* addr = - mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); + void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC, + MAP_PRIVATE, fileno(f), 0); DCHECK_NE(MAP_FAILED, addr); - OS::Free(addr, size); + CHECK(Free(addr, size)); fclose(f); } diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc index 3c19962186..081e434589 100644 --- a/deps/v8/src/base/platform/platform-macos.cc +++ b/deps/v8/src/base/platform/platform-macos.cc @@ -43,119 +43,12 @@ namespace v8 { namespace base { - -// Constants used for mmap. -// kMmapFd is used to pass vm_alloc flags to tag the region with the user -// defined tag 255 This helps identify V8-allocated regions in memory analysis -// tools like vmmap(1). -static const int kMmapFd = VM_MAKE_TAG(255); -static const off_t kMmapFdOffset = 0; - -// static -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = - mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset); - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = - mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return nullptr; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, - size, - prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { return true; } - std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<SharedLibraryAddress> result; unsigned int images_count = _dyld_image_count(); for (unsigned int i = 0; i < images_count; ++i) { const mach_header* header = _dyld_get_image_header(i); - if (header == NULL) continue; + if (header == nullptr) continue; #if V8_HOST_ARCH_X64 uint64_t size; char* code_ptr = getsectdatafromheader_64( @@ -165,7 +58,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { unsigned int size; char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); #endif - if (code_ptr == NULL) continue; + if (code_ptr == nullptr) continue; const intptr_t slide = _dyld_get_image_vmaddr_slide(i); const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start, @@ -174,7 +67,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) {} +void OS::SignalCodeMovingGC() {} TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc index 910d4a8104..9084c3075e 100644 --- a/deps/v8/src/base/platform/platform-openbsd.cc +++ b/deps/v8/src/base/platform/platform-openbsd.cc @@ -38,112 +38,13 @@ TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = - mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset); - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = - mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<SharedLibraryAddress> result; // This function assumes that the layout of the file is as follows: // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return result; + if (fp == nullptr) return result; // Allocate enough room to be able to store a full file name. const int kLibNameLen = FILENAME_MAX + 1; @@ -171,7 +72,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { ungetc(c, fp); // Push the '/' back into the stream to be read below. // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + if (fgets(lib_name, kLibNameLen, fp) == nullptr) break; // Drop the newline character read by fgets. We do not need to check // for a zero-length string because we know that we at least read the @@ -197,7 +98,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) { +void OS::SignalCodeMovingGC() { // Support for ll_prof.py. // // The Linux profiler built into the kernel logs all mmap's with @@ -208,13 +109,13 @@ void OS::SignalCodeMovingGC(void* hint) { // kernel log. int size = sysconf(_SC_PAGESIZE); FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); - if (f == NULL) { + if (f == nullptr) { OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); OS::Abort(); } void* addr = - mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); - DCHECK_NE(MAP_FAILED, addr); + mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); + DCHECK(addr != MAP_FAILED); OS::Free(addr, size); fclose(f); } diff --git a/deps/v8/src/base/platform/platform-posix-time.cc b/deps/v8/src/base/platform/platform-posix-time.cc index a960f7237e..54618810c2 100644 --- a/deps/v8/src/base/platform/platform-posix-time.cc +++ b/deps/v8/src/base/platform/platform-posix-time.cc @@ -19,7 +19,7 @@ const char* PosixDefaultTimezoneCache::LocalTimezone(double time) { } double PosixDefaultTimezoneCache::LocalTimeOffset() { - time_t tv = time(NULL); + time_t tv = time(nullptr); struct tm tm; struct tm* t = localtime_r(&tv, &tm); // tm_gmtoff includes any daylight savings offset, so subtract it. diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 8f658b95cb..b873197d3b 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -27,8 +27,6 @@ #include <sys/sysctl.h> // NOLINT, for sysctl #endif -#undef MAP_TYPE - #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) #define LOG_TAG "v8" #include <android/log.h> // NOLINT @@ -61,6 +59,22 @@ #include <sys/syscall.h> #endif +#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#if defined(V8_OS_SOLARIS) +#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__) +extern "C" int madvise(caddr_t, size_t, int); +#else +extern int madvise(caddr_t, size_t, int); +#endif +#endif + +#ifndef MADV_FREE +#define MADV_FREE MADV_DONTNEED +#endif + namespace v8 { namespace base { @@ -71,10 +85,96 @@ const pthread_t kNoThread = (pthread_t) 0; bool g_hard_abort = false; -const char* g_gc_fake_mmap = NULL; +const char* g_gc_fake_mmap = nullptr; + +static LazyInstance<RandomNumberGenerator>::type + platform_random_number_generator = LAZY_INSTANCE_INITIALIZER; + +#if !V8_OS_FUCHSIA +#if V8_OS_MACOSX +// kMmapFd is used to pass vm_alloc flags to tag the region with the user +// defined tag 255 This helps identify V8-allocated regions in memory analysis +// tools like vmmap(1). +const int kMmapFd = VM_MAKE_TAG(255); +#else // !V8_OS_MACOSX +const int kMmapFd = -1; +#endif // !V8_OS_MACOSX + +const int kMmapFdOffset = 0; + +int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + return PROT_NONE; + case OS::MemoryPermission::kReadWrite: + return PROT_READ | PROT_WRITE; + case OS::MemoryPermission::kReadWriteExecute: + return PROT_READ | PROT_WRITE | PROT_EXEC; + case OS::MemoryPermission::kReadExecute: + return PROT_READ | PROT_EXEC; + } + UNREACHABLE(); +} + +int GetFlagsForMemoryPermission(OS::MemoryPermission access) { + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + if (access == OS::MemoryPermission::kNoAccess) { +#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX + flags |= MAP_NORESERVE; +#endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX +#if V8_OS_QNX + flags |= MAP_LAZY; +#endif // V8_OS_QNX + } + return flags; +} + +void* Allocate(void* address, size_t size, OS::MemoryPermission access) { + const size_t actual_size = RoundUp(size, OS::AllocatePageSize()); + int prot = GetProtectionFromMemoryPermission(access); + int flags = GetFlagsForMemoryPermission(access); + void* result = + mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset); + if (result == MAP_FAILED) return nullptr; + return result; +} + +int ReclaimInaccessibleMemory(void* address, size_t size) { +#if defined(OS_MACOSX) + // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also + // marks the pages with the reusable bit, which allows both Activity Monitor + // and memory-infra to correctly track the pages. + int ret = madvise(address, size, MADV_FREE_REUSABLE); +#elif defined(_AIX) || defined(V8_OS_SOLARIS) + int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE); +#else + int ret = madvise(address, size, MADV_FREE); +#endif + if (ret != 0 && errno == EINVAL) { + // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older + // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't + // imply runtime support. +#if defined(_AIX) || defined(V8_OS_SOLARIS) + ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED); +#else + ret = madvise(address, size, MADV_DONTNEED); +#endif + } + return ret; +} + +#endif // !V8_OS_FUCHSIA } // namespace +void OS::Initialize(int64_t random_seed, bool hard_abort, + const char* const gc_fake_mmap) { + if (random_seed) { + platform_random_number_generator.Pointer()->SetSeed(random_seed); + } + g_hard_abort = hard_abort; + g_gc_fake_mmap = gc_fake_mmap; +} int OS::ActivationFrameAlignment() { #if V8_TARGET_ARCH_ARM @@ -95,77 +195,161 @@ int OS::ActivationFrameAlignment() { #endif } +// static +size_t OS::AllocatePageSize() { + return static_cast<size_t>(sysconf(_SC_PAGESIZE)); +} -intptr_t OS::CommitPageSize() { - static intptr_t page_size = getpagesize(); +// static +size_t OS::CommitPageSize() { + static size_t page_size = getpagesize(); return page_size; } -void* OS::Allocate(const size_t requested, size_t* allocated, - bool is_executable, void* hint) { - return OS::Allocate(requested, allocated, - is_executable ? OS::MemoryPermission::kReadWriteExecute - : OS::MemoryPermission::kReadWrite, - hint); +// static +void* OS::GetRandomMmapAddr() { +#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ + defined(THREAD_SANITIZER) + // Dynamic tools do not support custom mmap addresses. + return nullptr; +#endif + uintptr_t raw_addr; + platform_random_number_generator.Pointer()->NextBytes(&raw_addr, + sizeof(raw_addr)); +#if V8_TARGET_ARCH_X64 + // Currently available CPUs have 48 bits of virtual addressing. Truncate + // the hint address to 46 bits to give the kernel a fighting chance of + // fulfilling our placement request. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#elif V8_TARGET_ARCH_PPC64 +#if V8_OS_AIX + // AIX: 64 bits of virtual addressing, but we limit address range to: + // a) minimize Segment Lookaside Buffer (SLB) misses and + raw_addr &= V8_UINT64_C(0x3ffff000); + // Use extra address space to isolate the mmap regions. + raw_addr += V8_UINT64_C(0x400000000000); +#elif V8_TARGET_BIG_ENDIAN + // Big-endian Linux: 44 bits of virtual addressing. + raw_addr &= V8_UINT64_C(0x03fffffff000); +#else + // Little-endian Linux: 48 bits of virtual addressing. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#endif +#elif V8_TARGET_ARCH_S390X + // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits + // of virtual addressing. Truncate to 40 bits to allow kernel chance to + // fulfill request. + raw_addr &= V8_UINT64_C(0xfffffff000); +#elif V8_TARGET_ARCH_S390 + // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance + // to fulfill request. + raw_addr &= 0x1ffff000; +#else + raw_addr &= 0x3ffff000; + +#ifdef __sun + // For our Solaris/illumos mmap hint, we pick a random address in the bottom + // half of the top half of the address space (that is, the third quarter). + // Because we do not MAP_FIXED, this will be treated only as a hint -- the + // system will not fail to mmap() because something else happens to already + // be mapped at our random address. We deliberately set the hint high enough + // to get well above the system's break (that is, the heap); Solaris and + // illumos will try the hint and if that fails allocate as if there were + // no hint at all. The high hint prevents the break from getting hemmed in + // at low values, ceding half of the address space to the system heap. + raw_addr += 0x80000000; +#elif V8_OS_AIX + // The range 0x30000000 - 0xD0000000 is available on AIX; + // choose the upper range. + raw_addr += 0x90000000; +#else + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos + // 10.6 and 10.7. + raw_addr += 0x20000000; +#endif +#endif + return reinterpret_cast<void*>(raw_addr); } -void OS::Free(void* address, const size_t size) { - // TODO(1240712): munmap has a return value which is ignored here. - int result = munmap(address, size); - USE(result); - DCHECK(result == 0); -} +// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files. +#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA +// static +void* OS::Allocate(void* address, size_t size, size_t alignment, + MemoryPermission access) { + size_t page_size = AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + address = AlignedAddress(address, alignment); + // Add the maximum misalignment so we are guaranteed an aligned base address. + size_t request_size = size + (alignment - page_size); + void* result = base::Allocate(address, request_size, access); + if (result == nullptr) return nullptr; + + // Unmap memory allocated before the aligned base address. + uint8_t* base = static_cast<uint8_t*>(result); + uint8_t* aligned_base = RoundUp(base, alignment); + if (aligned_base != base) { + DCHECK_LT(base, aligned_base); + size_t prefix_size = static_cast<size_t>(aligned_base - base); + CHECK(Free(base, prefix_size)); + request_size -= prefix_size; + } + // Unmap memory allocated after the potentially unaligned end. + if (size != request_size) { + DCHECK_LT(size, request_size); + size_t suffix_size = request_size - size; + CHECK(Free(aligned_base + size, suffix_size)); + request_size -= suffix_size; + } + DCHECK_EQ(size, request_size); + return static_cast<void*>(aligned_base); +} -// Get rid of writable permission on code allocations. -void OS::ProtectCode(void* address, const size_t size) { -#if V8_OS_CYGWIN - DWORD old_protect; - VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); -#else - mprotect(address, size, PROT_READ | PROT_EXEC); -#endif +// static +bool OS::Free(void* address, const size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize()); + DCHECK_EQ(0, size % AllocatePageSize()); + return munmap(address, size) == 0; } +// static +bool OS::Release(void* address, size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + return munmap(address, size) == 0; +} -// Create guard pages. -#if !V8_OS_FUCHSIA -void OS::Guard(void* address, const size_t size) { -#if V8_OS_CYGWIN - DWORD oldprotect; - VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); -#else - mprotect(address, size, PROT_NONE); -#endif +// static +bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + + int prot = GetProtectionFromMemoryPermission(access); + int ret = mprotect(address, size, prot); + if (ret == 0 && access == OS::MemoryPermission::kNoAccess) { + ret = ReclaimInaccessibleMemory(address, size); + } + return ret == 0; } -#endif // !V8_OS_FUCHSIA -// Make a region of memory readable and writable. -void OS::Unprotect(void* address, const size_t size) { -#if V8_OS_CYGWIN - DWORD oldprotect; - VirtualProtect(address, size, PAGE_READWRITE, &oldprotect); +// static +bool OS::HasLazyCommits() { +#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX + return true; #else - mprotect(address, size, PROT_READ | PROT_WRITE); + // TODO(bbudge) Return true for all POSIX platforms. + return false; #endif } - -void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { - g_hard_abort = hard_abort; - g_gc_fake_mmap = gc_fake_mmap; -} - +#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA const char* OS::GetGCFakeMMapFile() { return g_gc_fake_mmap; } -size_t OS::AllocateAlignment() { - return static_cast<size_t>(sysconf(_SC_PAGESIZE)); -} - - void OS::Sleep(TimeDelta interval) { usleep(static_cast<useconds_t>(interval.InMicroseconds())); } @@ -220,13 +404,14 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile { // static -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) { +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { if (FILE* file = fopen(name, "r+")) { if (fseek(file, 0, SEEK_END) == 0) { long size = ftell(file); // NOLINT(runtime/int) if (size >= 0) { - void* const memory = mmap(hint, size, PROT_READ | PROT_WRITE, - MAP_SHARED, fileno(file), 0); + void* const memory = + mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE, + MAP_SHARED, fileno(file), 0); if (memory != MAP_FAILED) { return new PosixMemoryMappedFile(file, memory, size); } @@ -239,13 +424,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) { // static -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint, +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, size_t size, void* initial) { if (FILE* file = fopen(name, "w+")) { size_t result = fwrite(initial, 1, size, file); if (result == size && !ferror(file)) { - void* memory = mmap(hint, result, PROT_READ | PROT_WRITE, MAP_SHARED, - fileno(file), 0); + void* memory = mmap(OS::GetRandomMmapAddr(), result, + PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); if (memory != MAP_FAILED) { return new PosixMemoryMappedFile(file, memory, result); } @@ -257,7 +442,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint, PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) OS::Free(memory_, size_); + if (memory_) CHECK(OS::Free(memory_, size_)); fclose(file_); } @@ -309,7 +494,7 @@ double PosixTimezoneCache::DaylightSavingsOffset(double time) { time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); struct tm tm; struct tm* t = localtime_r(&tv, &tm); - if (NULL == t) return std::numeric_limits<double>::quiet_NaN(); + if (nullptr == t) return std::numeric_limits<double>::quiet_NaN(); return t->tm_isdst > 0 ? 3600 * msPerSecond : 0; } @@ -325,16 +510,16 @@ int OS::GetLastError() { FILE* OS::FOpen(const char* path, const char* mode) { FILE* file = fopen(path, mode); - if (file == NULL) return NULL; + if (file == nullptr) return nullptr; struct stat file_stat; if (fstat(fileno(file), &file_stat) != 0) { fclose(file); - return NULL; + return nullptr; } bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); if (is_regular_file) return file; fclose(file); - return NULL; + return nullptr; } @@ -462,7 +647,7 @@ class Thread::PlatformData { Thread::Thread(const Options& options) : data_(new PlatformData), stack_size_(options.stack_size()), - start_semaphore_(NULL) { + start_semaphore_(nullptr) { if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) { stack_size_ = PTHREAD_STACK_MIN; } @@ -487,8 +672,7 @@ static void SetThreadName(const char* name) { int (*dynamic_pthread_setname_np)(const char*); *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = dlsym(RTLD_DEFAULT, "pthread_setname_np"); - if (dynamic_pthread_setname_np == NULL) - return; + if (dynamic_pthread_setname_np == nullptr) return; // Mac OS X does not expose the length limit of the name, so hardcode it. static const int kMaxNameLength = 63; @@ -509,9 +693,9 @@ static void* ThreadEntry(void* arg) { // one). { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); } SetThreadName(thread->name()); - DCHECK(thread->data()->thread_ != kNoThread); + DCHECK_NE(thread->data()->thread_, kNoThread); thread->NotifyStartedAndRun(); - return NULL; + return nullptr; } @@ -548,15 +732,11 @@ void Thread::Start() { DCHECK_EQ(0, result); result = pthread_attr_destroy(&attr); DCHECK_EQ(0, result); - DCHECK(data_->thread_ != kNoThread); + DCHECK_NE(data_->thread_, kNoThread); USE(result); } - -void Thread::Join() { - pthread_join(data_->thread_, NULL); -} - +void Thread::Join() { pthread_join(data_->thread_, nullptr); } static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) { #if V8_OS_CYGWIN @@ -595,7 +775,7 @@ static void InitializeTlsBaseOffset() { char buffer[kBufferSize]; size_t buffer_size = kBufferSize; int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; - if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) { + if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) { V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version"); } // The buffer now contains a string of the form XX.YY.ZZ, where @@ -605,7 +785,7 @@ static void InitializeTlsBaseOffset() { char* period_pos = strchr(buffer, '.'); *period_pos = '\0'; int kernel_version_major = - static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT + static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT // The constants below are taken from pthreads.s from the XNU kernel // sources archive at www.opensource.apple.com. if (kernel_version_major < 11) { @@ -633,7 +813,7 @@ static void CheckFastTls(Thread::LocalStorageKey key) { V8_Fatal(__FILE__, __LINE__, "V8 failed to initialize fast TLS on current kernel"); } - Thread::SetThreadLocal(key, NULL); + Thread::SetThreadLocal(key, nullptr); } #endif // V8_FAST_TLS_SUPPORTED @@ -648,7 +828,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() { } #endif pthread_key_t key; - int result = pthread_key_create(&key, NULL); + int result = pthread_key_create(&key, nullptr); DCHECK_EQ(0, result); USE(result); LocalStorageKey local_key = PthreadKeyToLocalKey(key); @@ -681,17 +861,9 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { USE(result); } -int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { - switch (access) { - case OS::MemoryPermission::kNoAccess: - return PROT_NONE; - case OS::MemoryPermission::kReadWrite: - return PROT_READ | PROT_WRITE; - case OS::MemoryPermission::kReadWriteExecute: - return PROT_READ | PROT_WRITE | PROT_EXEC; - } - UNREACHABLE(); -} +#undef LOG_TAG +#undef MAP_ANONYMOUS +#undef MADV_FREE } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h index b092bb526d..55861bc9ac 100644 --- a/deps/v8/src/base/platform/platform-posix.h +++ b/deps/v8/src/base/platform/platform-posix.h @@ -21,8 +21,6 @@ class PosixTimezoneCache : public TimezoneCache { static const int msPerSecond = 1000; }; -int GetProtectionFromMemoryPermission(OS::MemoryPermission access); - } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc index 68bc0efbf9..640b77c816 100644 --- a/deps/v8/src/base/platform/platform-qnx.cc +++ b/deps/v8/src/base/platform/platform-qnx.cc @@ -89,106 +89,9 @@ TimezoneCache* OS::CreateTimezoneCache() { return new PosixDefaultTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, - kMmapFdOffset); - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = - mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd, - kMmapFdOffset)) { - return false; - } - - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { return false; } - std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<SharedLibraryAddress> result; - procfs_mapinfo *mapinfos = NULL, *mapinfo; + procfs_mapinfo *mapinfos = nullptr, *mapinfo; int proc_fd, num, i; struct { @@ -205,14 +108,14 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { } /* Get the number of map entries. */ - if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) { + if (devctl(proc_fd, DCMD_PROC_MAPINFO, nullptr, 0, &num) != EOK) { close(proc_fd); return result; } mapinfos = reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo))); - if (mapinfos == NULL) { + if (mapinfos == nullptr) { close(proc_fd); return result; } @@ -241,7 +144,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return result; } -void OS::SignalCodeMovingGC(void* hint) {} +void OS::SignalCodeMovingGC() {} } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc index 2ea6ef4a6c..b81895a3fb 100644 --- a/deps/v8/src/base/platform/platform-solaris.cc +++ b/deps/v8/src/base/platform/platform-solaris.cc @@ -47,7 +47,7 @@ const char* SolarisTimezoneCache::LocalTimezone(double time) { time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); struct tm tm; struct tm* t = localtime_r(&tv, &tm); - if (NULL == t) return ""; + if (nullptr == t) return ""; return tzname[0]; // The location of the timezone string on Solaris. } @@ -58,111 +58,11 @@ double SolarisTimezoneCache::LocalTimeOffset() { TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); } -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = GetProtectionFromMemoryPermission(access); - void* mbase = - mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset); - - if (mbase == MAP_FAILED) return NULL; - *allocated = msize; - return mbase; -} - -// static -void* OS::ReserveRegion(size_t size, void* hint) { - void* result = - mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - -// static -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* result = ReserveRegion(request_size, hint); - if (result == nullptr) { - *allocated = 0; - return nullptr; - } - - uint8_t* base = static_cast<uint8_t*>(result); - uint8_t* aligned_base = RoundUp(base, alignment); - DCHECK_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - DCHECK_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - DCHECK(aligned_size == request_size); - - *allocated = aligned_size; - return static_cast<void*>(aligned_base); -} - -// static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - -// static -bool OS::UncommitRegion(void* address, size_t size) { - return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - -// static -bool OS::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - -// static -bool OS::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return std::vector<SharedLibraryAddress>(); } -void OS::SignalCodeMovingGC(void* hint) {} +void OS::SignalCodeMovingGC() {} } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index de1a27506f..e026d7edae 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -20,10 +20,12 @@ #include "src/base/win32-headers.h" #include "src/base/bits.h" +#include "src/base/lazy-instance.h" #include "src/base/macros.h" #include "src/base/platform/platform.h" #include "src/base/platform/time.h" #include "src/base/timezone-cache.h" +#include "src/base/utils/random-number-generator.h" // Extra functions for MinGW. Most of these are the _s functions which are in // the Microsoft Visual Studio C++ CRT. @@ -45,14 +47,14 @@ inline void MemoryFence() { int localtime_s(tm* out_tm, const time_t* time) { tm* posix_local_time_struct = localtime_r(time, out_tm); - if (posix_local_time_struct == NULL) return 1; + if (posix_local_time_struct == nullptr) return 1; return 0; } int fopen_s(FILE** pFile, const char* filename, const char* mode) { *pFile = fopen(filename, mode); - return *pFile != NULL ? 0 : 1; + return *pFile != nullptr ? 0 : 1; } int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, @@ -63,8 +65,8 @@ int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { - CHECK(source != NULL); - CHECK(dest != NULL); + CHECK(source != nullptr); + CHECK(dest != nullptr); CHECK_GT(dest_size, 0); if (count == _TRUNCATE) { @@ -137,11 +139,11 @@ class WindowsTimezoneCache : public TimezoneCache { } // Make standard and DST timezone names. - WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, - std_tz_name_, kTzNameSize, NULL, NULL); + WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, std_tz_name_, + kTzNameSize, nullptr, nullptr); std_tz_name_[kTzNameSize - 1] = '\0'; - WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, - dst_tz_name_, kTzNameSize, NULL, NULL); + WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, dst_tz_name_, + kTzNameSize, nullptr, nullptr); dst_tz_name_[kTzNameSize - 1] = '\0'; // If OS returned empty string or resource id (like "@tzres.dll,-211") @@ -551,7 +553,7 @@ FILE* OS::FOpen(const char* path, const char* mode) { if (fopen_s(&result, path, mode) == 0) { return result; } else { - return NULL; + return nullptr; } } @@ -572,13 +574,13 @@ FILE* OS::OpenTemporaryFile() { char tempPathBuffer[MAX_PATH]; DWORD path_result = 0; path_result = GetTempPathA(MAX_PATH, tempPathBuffer); - if (path_result > MAX_PATH || path_result == 0) return NULL; + if (path_result > MAX_PATH || path_result == 0) return nullptr; UINT name_result = 0; char tempNameBuffer[MAX_PATH]; name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer); - if (name_result == 0) return NULL; + if (name_result == 0) return nullptr; FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses. - if (result != NULL) { + if (result != nullptr) { Remove(tempNameBuffer); // Delete on close. } return result; @@ -672,42 +674,81 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { #undef _TRUNCATE #undef STRUNCATE +// The allocation alignment is the guaranteed alignment for +// VirtualAlloc'ed blocks of memory. +size_t OS::AllocatePageSize() { + static size_t allocate_alignment = 0; + if (allocate_alignment == 0) { + SYSTEM_INFO info; + GetSystemInfo(&info); + allocate_alignment = info.dwAllocationGranularity; + } + return allocate_alignment; +} -// Get the system's page size used by VirtualAlloc() or the next power -// of two. The reason for always returning a power of two is that the -// rounding up in OS::Allocate expects that. -static size_t GetPageSize() { +size_t OS::CommitPageSize() { static size_t page_size = 0; if (page_size == 0) { SYSTEM_INFO info; GetSystemInfo(&info); - page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize); + page_size = info.dwPageSize; + DCHECK_EQ(4096, page_size); } return page_size; } +static LazyInstance<RandomNumberGenerator>::type + platform_random_number_generator = LAZY_INSTANCE_INITIALIZER; -// The allocation alignment is the guaranteed alignment for -// VirtualAlloc'ed blocks of memory. -size_t OS::AllocateAlignment() { - static size_t allocate_alignment = 0; - if (allocate_alignment == 0) { - SYSTEM_INFO info; - GetSystemInfo(&info); - allocate_alignment = info.dwAllocationGranularity; +void OS::Initialize(int64_t random_seed, bool hard_abort, + const char* const gc_fake_mmap) { + if (random_seed) { + platform_random_number_generator.Pointer()->SetSeed(random_seed); } - return allocate_alignment; + g_hard_abort = hard_abort; } -void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { - g_hard_abort = hard_abort; +void* OS::GetRandomMmapAddr() { +// The address range used to randomize RWX allocations in OS::Allocate +// Try not to map pages into the default range that windows loads DLLs +// Use a multiple of 64k to prevent committing unused memory. +// Note: This does not guarantee RWX regions will be within the +// range kAllocationRandomAddressMin to kAllocationRandomAddressMax +#ifdef V8_HOST_ARCH_64_BIT + static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000; + static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; +#else + static const uintptr_t kAllocationRandomAddressMin = 0x04000000; + static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000; +#endif + uintptr_t address; + platform_random_number_generator.Pointer()->NextBytes(&address, + sizeof(address)); + address <<= kPageSizeBits; + address += kAllocationRandomAddressMin; + address &= kAllocationRandomAddressMax; + return reinterpret_cast<void*>(address); } namespace { -static void* RandomizedVirtualAlloc(size_t size, int action, int protection, - void* hint) { - LPVOID base = NULL; +DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { + switch (access) { + case OS::MemoryPermission::kNoAccess: + return PAGE_NOACCESS; + case OS::MemoryPermission::kReadWrite: + return PAGE_READWRITE; + case OS::MemoryPermission::kReadWriteExecute: + return PAGE_EXECUTE_READWRITE; + case OS::MemoryPermission::kReadExecute: + return PAGE_EXECUTE_READ; + } + UNREACHABLE(); +} + +uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, + void* hint) { + LPVOID base = nullptr; static BOOL use_aslr = -1; #ifdef V8_HOST_ARCH_32_BIT // Don't bother randomizing on 32-bit hosts, because they lack the room and @@ -718,146 +759,96 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection, use_aslr = TRUE; #endif - if (use_aslr && - (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) { - // For executable pages try and randomize the allocation address - base = VirtualAlloc(hint, size, action, protection); + if (use_aslr && protect != PAGE_READWRITE) { + // For executable or reserved pages try to randomize the allocation address. + base = VirtualAlloc(hint, size, flags, protect); } - // After three attempts give up and let the OS find an address to use. - if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); - - return base; -} - -} // namespace - -void* OS::Allocate(const size_t requested, size_t* allocated, - bool is_executable, void* hint) { - return OS::Allocate(requested, allocated, - is_executable ? OS::MemoryPermission::kReadWriteExecute - : OS::MemoryPermission::kReadWrite, - hint); -} - -void* OS::Allocate(const size_t requested, size_t* allocated, - OS::MemoryPermission access, void* hint) { - // VirtualAlloc rounds allocated size to page size automatically. - size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); - - // Windows XP SP2 allows Data Excution Prevention (DEP). - int prot = PAGE_NOACCESS; - switch (access) { - case OS::MemoryPermission::kNoAccess: { - prot = PAGE_NOACCESS; - break; - } - case OS::MemoryPermission::kReadWrite: { - prot = PAGE_READWRITE; - break; - } - case OS::MemoryPermission::kReadWriteExecute: { - prot = PAGE_EXECUTE_READWRITE; - break; - } + // On failure, let the OS find an address to use. + if (base == nullptr) { + base = VirtualAlloc(nullptr, size, flags, protect); } - - LPVOID mbase = - RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint); - - if (mbase == NULL) return NULL; - - DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0); - - *allocated = msize; - return mbase; + return reinterpret_cast<uint8_t*>(base); } -void OS::Free(void* address, const size_t size) { - // TODO(1240712): VirtualFree has a return value which is ignored here. - VirtualFree(address, 0, MEM_RELEASE); - USE(size); -} - -intptr_t OS::CommitPageSize() { - return 4096; -} - -void OS::ProtectCode(void* address, const size_t size) { - DWORD old_protect; - VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); -} - -void OS::Guard(void* address, const size_t size) { - DWORD oldprotect; - VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); -} - -void OS::Unprotect(void* address, const size_t size) { - LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE); - USE(result); -} +} // namespace // static -void* OS::ReserveRegion(size_t size, void* hint) { - return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint); -} - -void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated) { - DCHECK((alignment % OS::AllocateAlignment()) == 0); - hint = AlignedAddress(hint, alignment); - size_t request_size = - RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); - void* address = ReserveRegion(request_size, hint); - if (address == nullptr) { - *allocated = 0; - return nullptr; +void* OS::Allocate(void* address, size_t size, size_t alignment, + MemoryPermission access) { + size_t page_size = AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + DCHECK_LE(page_size, alignment); + address = AlignedAddress(address, alignment); + + DWORD flags = (access == OS::MemoryPermission::kNoAccess) + ? MEM_RESERVE + : MEM_RESERVE | MEM_COMMIT; + DWORD protect = GetProtectionFromMemoryPermission(access); + + // First, try an exact size aligned allocation. + uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address); + if (base == nullptr) return nullptr; // Can't allocate, we're OOM. + + // If address is suitably aligned, we're done. + uint8_t* aligned_base = RoundUp(base, alignment); + if (base == aligned_base) return reinterpret_cast<void*>(base); + + // Otherwise, free it and try a larger allocation. + CHECK(Free(base, size)); + + // Clear the hint. It's unlikely we can allocate at this address. + address = nullptr; + + // Add the maximum misalignment so we are guaranteed an aligned base address + // in the allocated region. + size_t padded_size = size + (alignment - page_size); + const int kMaxAttempts = 3; + aligned_base = nullptr; + for (int i = 0; i < kMaxAttempts; ++i) { + base = RandomizedVirtualAlloc(padded_size, flags, protect, address); + if (base == nullptr) return nullptr; // Can't allocate, we're OOM. + + // Try to trim the allocation by freeing the padded allocation and then + // calling VirtualAlloc at the aligned base. + CHECK(Free(base, padded_size)); + aligned_base = RoundUp(base, alignment); + base = reinterpret_cast<uint8_t*>( + VirtualAlloc(aligned_base, size, flags, protect)); + // We might not get the reduced allocation due to a race. In that case, + // base will be nullptr. + if (base != nullptr) break; } - uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment); - // Try reducing the size by freeing and then reallocating a specific area. - bool result = ReleaseRegion(address, request_size); - USE(result); - DCHECK(result); - address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); - if (address != nullptr) { - request_size = size; - DCHECK(base == static_cast<uint8_t*>(address)); - } else { - // Resizing failed, just go with a bigger area. - address = ReserveRegion(request_size, hint); - if (address == nullptr) { - *allocated = 0; - return nullptr; - } - } - - *allocated = request_size; - return static_cast<void*>(address); + DCHECK_EQ(base, aligned_base); + return reinterpret_cast<void*>(base); } // static -bool OS::CommitRegion(void* address, size_t size, bool is_executable) { - int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) { - return false; - } - return true; +bool OS::Free(void* address, const size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize()); + // TODO(bbudge) Add DCHECK_EQ(0, size % AllocatePageSize()) when callers + // pass the correct size on Windows. + USE(size); + return VirtualFree(address, 0, MEM_RELEASE) != 0; } // static -bool OS::UncommitRegion(void* address, size_t size) { +bool OS::Release(void* address, size_t size) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); return VirtualFree(address, size, MEM_DECOMMIT) != 0; } // static -bool OS::ReleaseRegion(void* address, size_t size) { - return VirtualFree(address, 0, MEM_RELEASE) != 0; -} - -// static -bool OS::ReleasePartialRegion(void* address, size_t size) { - return VirtualFree(address, size, MEM_DECOMMIT) != 0; +bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { + DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize()); + DCHECK_EQ(0, size % CommitPageSize()); + if (access == MemoryPermission::kNoAccess) { + return VirtualFree(address, size, MEM_DECOMMIT) != 0; + } + DWORD protect = GetProtectionFromMemoryPermission(access); + return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr; } // static @@ -916,18 +907,19 @@ class Win32MemoryMappedFile final : public OS::MemoryMappedFile { // static -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) { +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { // Open a physical file HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); - if (file == INVALID_HANDLE_VALUE) return NULL; + FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr, + OPEN_EXISTING, 0, nullptr); + if (file == INVALID_HANDLE_VALUE) return nullptr; - DWORD size = GetFileSize(file, NULL); + DWORD size = GetFileSize(file, nullptr); - // Create a file mapping for the physical file. Ignore hint on Windows. + // Create a file mapping for the physical file HANDLE file_mapping = - CreateFileMapping(file, NULL, PAGE_READWRITE, 0, size, NULL); - if (file_mapping == NULL) return NULL; + CreateFileMapping(file, nullptr, PAGE_READWRITE, 0, size, nullptr); + if (file_mapping == nullptr) return nullptr; // Map a view of the file into memory void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); @@ -936,17 +928,17 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) { // static -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint, +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, size_t size, void* initial) { // Open a physical file HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, - OPEN_ALWAYS, 0, NULL); - if (file == NULL) return NULL; - // Create a file mapping for the physical file. Ignore hint on Windows. - HANDLE file_mapping = CreateFileMapping(file, NULL, PAGE_READWRITE, 0, - static_cast<DWORD>(size), NULL); - if (file_mapping == NULL) return NULL; + FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr, + OPEN_ALWAYS, 0, nullptr); + if (file == nullptr) return nullptr; + // Create a file mapping for the physical file + HANDLE file_mapping = CreateFileMapping(file, nullptr, PAGE_READWRITE, 0, + static_cast<DWORD>(size), nullptr); + if (file_mapping == nullptr) return nullptr; // Map a view of the file into memory void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); if (memory) memmove(memory, initial, size); @@ -1062,7 +1054,7 @@ typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot, #undef VOID // Declare a variable for each dynamically loaded DLL function. -#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL; +#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = nullptr; DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION) TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION) #undef DEF_DLL_FUNCTION @@ -1079,7 +1071,7 @@ static bool LoadDbgHelpAndTlHelp32() { // Load functions from the dbghelp.dll module. module = LoadLibrary(TEXT("dbghelp.dll")); - if (module == NULL) { + if (module == nullptr) { return false; } @@ -1094,7 +1086,7 @@ DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC) // Load functions from the kernel32.dll module (the TlHelp32.h function used // to be in tlhelp32.dll but are now moved to kernel32.dll). module = LoadLibrary(TEXT("kernel32.dll")); - if (module == NULL) { + if (module == nullptr) { return false; } @@ -1107,14 +1099,14 @@ TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC) #undef LOAD_DLL_FUNC // Check that all functions where loaded. - bool result = -#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) && +bool result = +#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != nullptr)&& -DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED) -TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED) + DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED) + TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED) #undef DLL_FUNC_LOADED - true; + true; dbghelp_loaded = result; return result; @@ -1141,7 +1133,7 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols( // Initialize the symbol engine. ok = _SymInitialize(process_handle, // hProcess - NULL, // UserSearchPath + nullptr, // UserSearchPath false); // fInvadeProcess if (!ok) return result; @@ -1185,10 +1177,10 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols( } } int lib_name_length = WideCharToMultiByte( - CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL); + CP_UTF8, 0, module_entry.szExePath, -1, nullptr, 0, nullptr, nullptr); std::string lib_name(lib_name_length, 0); WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0], - lib_name_length, NULL, NULL); + lib_name_length, nullptr, nullptr); result.push_back(OS::SharedLibraryAddress( lib_name, reinterpret_cast<uintptr_t>(module_entry.modBaseAddr), reinterpret_cast<uintptr_t>(module_entry.modBaseAddr + @@ -1212,13 +1204,16 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return LoadSymbols(process_handle); } +void OS::SignalCodeMovingGC() {} + #else // __MINGW32__ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { return std::vector<OS::SharedLibraryAddress>(); } + +void OS::SignalCodeMovingGC() {} #endif // __MINGW32__ -void OS::SignalCodeMovingGC(void* hint) {} int OS::ActivationFrameAlignment() { #ifdef _WIN64 @@ -1261,8 +1256,7 @@ class Thread::PlatformData { // handle until it is started. Thread::Thread(const Options& options) - : stack_size_(options.stack_size()), - start_semaphore_(NULL) { + : stack_size_(options.stack_size()), start_semaphore_(nullptr) { data_ = new PlatformData(kNoThread); set_name(options.name()); } @@ -1286,12 +1280,8 @@ Thread::~Thread() { // initialize thread specific structures in the C runtime library. void Thread::Start() { data_->thread_ = reinterpret_cast<HANDLE>( - _beginthreadex(NULL, - static_cast<unsigned>(stack_size_), - ThreadEntry, - this, - 0, - &data_->thread_id_)); + _beginthreadex(nullptr, static_cast<unsigned>(stack_size_), ThreadEntry, + this, 0, &data_->thread_id_)); } diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 0ff8599b0c..dd454ecd43 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -62,7 +62,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { kPointerSize * index)); } intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); - DCHECK(extra != 0); + DCHECK_NE(extra, 0); return *reinterpret_cast<intptr_t*>(extra + kPointerSize * (index - kMaxInlineSlots)); } @@ -107,9 +107,11 @@ class TimezoneCache; class V8_BASE_EXPORT OS { public: // Initialize the OS class. + // - random_seed: Used for the GetRandomMmapAddress() if non-zero. // - hard_abort: If true, OS::Abort() will crash instead of aborting. // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof. - static void Initialize(bool hard_abort, const char* const gc_fake_mmap); + static void Initialize(int64_t random_seed, bool hard_abort, + const char* const gc_fake_mmap); // Returns the accumulated user time for thread. This routine // can be used for profiling. The implementation should @@ -155,55 +157,47 @@ class V8_BASE_EXPORT OS { static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...); static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args); - // Memory access permissions. Only the modes currently used by V8 are listed - // here even though most systems support additional modes. - enum class MemoryPermission { kNoAccess, kReadWrite, kReadWriteExecute }; - - // Allocate/Free memory used by JS heap. Permissions are set according to the - // is_* flags. Returns the address of allocated memory, or NULL if failed. - static void* Allocate(const size_t requested, size_t* allocated, - MemoryPermission access, void* hint = nullptr); - // Allocate/Free memory used by JS heap. Pages are readable/writable, but - // they are not guaranteed to be executable unless 'executable' is true. - // Returns the address of allocated memory, or NULL if failed. - static void* Allocate(const size_t requested, size_t* allocated, - bool is_executable, void* hint = nullptr); - static void Free(void* address, const size_t size); - - // Allocates a region of memory that is inaccessible. On Windows this reserves - // but does not commit the memory. On POSIX systems it allocates memory as - // PROT_NONE, which also prevents it from being committed. - static void* AllocateGuarded(const size_t requested); - - // This is the granularity at which the ProtectCode(...) call can set page - // permissions. - static intptr_t CommitPageSize(); - - // Mark code segments non-writable. - static void ProtectCode(void* address, const size_t size); - - // Assign memory as a guard page so that access will cause an exception. - static void Guard(void* address, const size_t size); - - // Make a region of memory readable and writable. - static void Unprotect(void* address, const size_t size); + enum class MemoryPermission { + kNoAccess, + kReadWrite, + // TODO(hpayer): Remove this flag. Memory should never be rwx. + kReadWriteExecute, + kReadExecute + }; - // Get the Alignment guaranteed by Allocate(). - static size_t AllocateAlignment(); + // Gets the page granularity for Allocate. Addresses returned by Allocate are + // aligned to this size. + static size_t AllocatePageSize(); - static void* ReserveRegion(size_t size, void* hint); + // Gets the granularity at which the permissions and commit calls can be made. + static size_t CommitPageSize(); - static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint, - size_t* allocated); + // Generate a random address to be used for hinting allocation calls. + static void* GetRandomMmapAddr(); - static bool CommitRegion(void* address, size_t size, bool is_executable); + // Allocates memory. Permissions are set according to the access argument. + // The address parameter is a hint. The size and alignment parameters must be + // multiples of AllocatePageSize(). Returns the address of the allocated + // memory, with the specified size and alignment, or nullptr on failure. + V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size, + size_t alignment, + MemoryPermission access); - static bool UncommitRegion(void* address, size_t size); + // Frees memory allocated by a call to Allocate. address and size must be + // multiples of AllocatePageSize(). Returns true on success, otherwise false. + V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size); - static bool ReleaseRegion(void* address, size_t size); + // Releases memory that is no longer needed. The range specified by address + // and size must be part of an allocated memory region, and must be multiples + // of CommitPageSize(). Released memory is left in an undefined state, so it + // should not be accessed. Returns true on success, otherwise false. + V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size); - // Release part of a reserved address range. - static bool ReleasePartialRegion(void* address, size_t size); + // Sets permissions according to the access argument. address and size must be + // multiples of CommitPageSize(). Setting permission to kNoAccess may cause + // the memory contents to be lost. Returns true on success, otherwise false. + V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size, + MemoryPermission access); static bool HasLazyCommits(); @@ -231,8 +225,8 @@ class V8_BASE_EXPORT OS { virtual void* memory() const = 0; virtual size_t size() const = 0; - static MemoryMappedFile* open(const char* name, void* hint); - static MemoryMappedFile* create(const char* name, void* hint, size_t size, + static MemoryMappedFile* open(const char* name); + static MemoryMappedFile* create(const char* name, size_t size, void* initial); }; @@ -271,7 +265,7 @@ class V8_BASE_EXPORT OS { // process that a code moving garbage collection starts. Can do // nothing, in which case the code objects must not move (e.g., by // using --never-compact) if accurate profiling is desired. - static void SignalCodeMovingGC(void* hint); + static void SignalCodeMovingGC(); // Support runtime detection of whether the hard float option of the // EABI is used. @@ -335,7 +329,7 @@ class V8_BASE_EXPORT Thread { Start(); start_semaphore_->Wait(); delete start_semaphore_; - start_semaphore_ = NULL; + start_semaphore_ = nullptr; } // Wait until thread terminates. @@ -360,7 +354,7 @@ class V8_BASE_EXPORT Thread { SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); } static bool HasThreadLocal(LocalStorageKey key) { - return GetThreadLocal(key) != NULL; + return GetThreadLocal(key) != nullptr; } #ifdef V8_FAST_TLS_SUPPORTED diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc index 346705fd02..9a7ef7a8f4 100644 --- a/deps/v8/src/base/platform/semaphore.cc +++ b/deps/v8/src/base/platform/semaphore.cc @@ -73,7 +73,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) { #elif V8_OS_POSIX Semaphore::Semaphore(int count) { - DCHECK(count >= 0); + DCHECK_GE(count, 0); int result = sem_init(&native_handle_, 0, count); DCHECK_EQ(0, result); USE(result); @@ -135,9 +135,9 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) { #elif V8_OS_WIN Semaphore::Semaphore(int count) { - DCHECK(count >= 0); - native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL); - DCHECK(native_handle_ != NULL); + DCHECK_GE(count, 0); + native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr); + DCHECK_NOT_NULL(native_handle_); } diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index 6695bf8e57..3529d55875 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -38,7 +38,7 @@ int64_t ComputeThreadTicks() { THREAD_BASIC_INFO, reinterpret_cast<thread_info_t>(&thread_info_data), &thread_info_count); - CHECK(kr == KERN_SUCCESS); + CHECK_EQ(kr, KERN_SUCCESS); v8::base::CheckedNumeric<int64_t> absolute_micros( thread_info_data.user_time.seconds + @@ -195,7 +195,7 @@ TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) { struct mach_timespec TimeDelta::ToMachTimespec() const { struct mach_timespec ts; - DCHECK(delta_ >= 0); + DCHECK_GE(delta_, 0); ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond); ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond; @@ -316,7 +316,7 @@ Time Time::FromFiletime(FILETIME ft) { FILETIME Time::ToFiletime() const { - DCHECK(us_ >= 0); + DCHECK_GE(us_, 0); FILETIME ft; if (IsNull()) { ft.dwLowDateTime = 0; @@ -338,7 +338,7 @@ FILETIME Time::ToFiletime() const { Time Time::Now() { struct timeval tv; - int result = gettimeofday(&tv, NULL); + int result = gettimeofday(&tv, nullptr); DCHECK_EQ(0, result); USE(result); return FromTimeval(tv); @@ -351,8 +351,8 @@ Time Time::NowFromSystemTime() { Time Time::FromTimespec(struct timespec ts) { - DCHECK(ts.tv_nsec >= 0); - DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT + DCHECK_GE(ts.tv_nsec, 0); + DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond); if (ts.tv_nsec == 0 && ts.tv_sec == 0) { return Time(); } @@ -384,7 +384,7 @@ struct timespec Time::ToTimespec() const { Time Time::FromTimeval(struct timeval tv) { - DCHECK(tv.tv_usec >= 0); + DCHECK_GE(tv.tv_usec, 0); DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond)); if (tv.tv_usec == 0 && tv.tv_sec == 0) { return Time(); @@ -577,7 +577,7 @@ static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait, ThreadSafeInitOnceTrait>::type high_res_tick_clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER; - +// static TimeTicks TimeTicks::Now() { // Make sure we never return 0 here. TimeTicks ticks(tick_clock.Pointer()->Now()); @@ -585,7 +585,7 @@ TimeTicks TimeTicks::Now() { return ticks; } - +// static TimeTicks TimeTicks::HighResolutionNow() { // Make sure we never return 0 here. TimeTicks ticks(high_res_tick_clock.Pointer()->Now()); diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h index ed1751268f..25dee1c419 100644 --- a/deps/v8/src/base/platform/time.h +++ b/deps/v8/src/base/platform/time.h @@ -280,7 +280,7 @@ class TimeBase { class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> { public: - // Contains the NULL time. Use Time::Now() to get the current time. + // Contains the nullptr time. Use Time::Now() to get the current time. Time() : TimeBase(0) {} // Returns the current time. Watch out, the system might adjust its clock |