summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-09-09 16:52:18 +0000
committerDmitri Shuralyov <dmitshur@golang.org>2020-09-10 18:38:25 +0000
commit3b364d9e7e0c1271e547c630960f8f856f22e715 (patch)
treef96902eae5bfe55cf962e8ed49db07b831036a93
parent77029b76b01fbb4dbeda57664ef62e98efbc2440 (diff)
downloadgo-git-3b364d9e7e0c1271e547c630960f8f856f22e715.tar.gz
[release-branch.go1.14] runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration
Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. For #41296. Fixes #41322. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> (cherry picked from commit 34835df04891a1d54394888b763af88f9476101d) Reviewed-on: https://go-review.googlesource.com/c/go/+/253922 Run-TryBot: Dmitri Shuralyov <dmitshur@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
-rw-r--r--src/runtime/export_test.go17
-rw-r--r--src/runtime/mpagealloc.go13
2 files changed, 23 insertions, 7 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 6a8d00c60d..cf07a37e49 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -360,7 +360,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -753,11 +757,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -896,7 +896,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 3c56b6041b..3291e0ca4b 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -331,7 +331,20 @@ func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
return 0
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}