summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2016-04-14 13:41:36 -0400
committerAustin Clements <austin@google.com>2016-04-16 21:42:43 +0000
commit2cdcb6f8296b6528bb7d256a45e339c4aefb9109 (patch)
treec4ff128c339bd03f313c0f9ab67b70eed94599da
parent1151473077fb03df798d4eb57a22fa820f9e41f8 (diff)
downloadgo-git-2cdcb6f8296b6528bb7d256a45e339c4aefb9109.tar.gz
runtime: scavenge memory on physical page-aligned boundaries
Currently the scavenger marks memory unused in multiples of the allocator page size (8K). This is safe as long as the true physical page size is 4K (or 8K), as it is on many platforms. However, on ARM64, PPC64x, and MIPS64, the physical page size is larger than 8K, so if we attempt to mark memory unused, the kernel will round the boundaries of the region *out* to all pages covered by the requested region, and we'll release a larger region of memory than intended. As a result, the scavenger is currently disabled on these platforms. Fix this by first rounding the region to be marked unused *in* to multiples of the physical page size, so that when we ask the kernel to mark it unused, it releases exactly the requested region. Fixes #9993. Change-Id: I96d5fdc2f77f9d69abadcea29bcfe55e68288cb1 Reviewed-on: https://go-review.googlesource.com/22066 Reviewed-by: Rick Hudson <rlh@golang.org>
-rw-r--r--src/runtime/mheap.go34
1 files changed, 22 insertions, 12 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 895af9f07c..99f7b54fc8 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -824,15 +824,6 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
}
func scavengelist(list *mSpanList, now, limit uint64) uintptr {
- if sys.PhysPageSize > _PageSize {
- // golang.org/issue/9993
- // If the physical page size of the machine is larger than
- // our logical heap page size the kernel may round up the
- // amount to be freed to its page size and corrupt the heap
- // pages surrounding the unused block.
- return 0
- }
-
if list.isEmpty() {
return 0
}
@@ -840,11 +831,30 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
var sumreleased uintptr
for s := list.first; s != nil; s = s.next {
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
- released := (s.npages - s.npreleased) << _PageShift
+ start := uintptr(s.start) << _PageShift
+ end := start + s.npages<<_PageShift
+ if sys.PhysPageSize > _PageSize {
+ // We can only release pages in
+ // PhysPageSize blocks, so round start
+ // and end in. (Otherwise, madvise
+ // will round them *out* and release
+ // more memory than we want.)
+ start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1)
+ end &^= sys.PhysPageSize - 1
+ if start == end {
+ continue
+ }
+ }
+ len := end - start
+
+ released := len - (s.npreleased << _PageShift)
+ if sys.PhysPageSize > _PageSize && released == 0 {
+ continue
+ }
memstats.heap_released += uint64(released)
sumreleased += released
- s.npreleased = s.npages
- sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ s.npreleased = len >> _PageShift
+ sysUnused(unsafe.Pointer(start), len)
}
}
return sumreleased