From ad642727247383079c8546ca365172859641a800 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 25 Aug 2020 12:34:02 -0400 Subject: runtime: rename pageAlloc receiver The history of pageAlloc using 's' as a receiver are lost to the depths of time (perhaps it used to be called summary?), but it doesn't make much sense anymore. Rename it to 'p'. Generated with: $ cd src/runtime $ grep -R -b "func (s \*pageAlloc" . | awk -F : '{ print $1 ":#" $2+6 }' | xargs -n 1 -I {} env GOROOT=$(pwd)/../../ gorename -offset {} -to p -v $ grep -R -b "func (s \*pageAlloc" . | awk -F : '{ print $1 ":#" $2+6 }' | xargs -n 1 -I {} env GOROOT=$(pwd)/../../ GOARCH=386 gorename -offset {} -to p -v $ GOROOT=$(pwd)/../../ gorename -offset mpagecache.go:#2397 -to p -v ($2+6 to advance past "func (".) Plus manual comment fixups. Change-Id: I2d521a1cbf6ebe2ef6aae92e654bfc33c63d1aa9 Reviewed-on: https://go-review.googlesource.com/c/go/+/250517 Trust: Michael Pratt Run-TryBot: Michael Pratt TryBot-Result: Go Bot Reviewed-by: Michael Knyszek --- src/runtime/mpagealloc.go | 200 +++++++++++++++++++++++----------------------- 1 file changed, 100 insertions(+), 100 deletions(-) (limited to 'src/runtime/mpagealloc.go') diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index c90a6378bd..560babed03 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -299,7 +299,7 @@ type pageAlloc struct { test bool } -func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { +func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { if levelLogPages[0] > logMaxPackedValue { // We can't represent 1< s.end { - s.end = end + if end > p.end { + p.end = end } // Note that [base, limit) will never overlap with any existing // range inUse because grow only ever adds never-used memory // regions to the page allocator. - s.inUse.add(makeAddrRange(base, limit)) + p.inUse.add(makeAddrRange(base, limit)) // A grow operation is a lot like a free operation, so if our - // chunk ends up below s.searchAddr, update s.searchAddr to the + // chunk ends up below p.searchAddr, update p.searchAddr to the // new address, just like in free. - if b := (offAddr{base}); b.lessThan(s.searchAddr) { - s.searchAddr = b + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b } // Add entries into chunks, which is sparse, if needed. Then, @@ -387,21 +387,21 @@ func (s *pageAlloc) grow(base, size uintptr) { // Newly-grown memory is always considered scavenged. // Set all the bits in the scavenged bitmaps high. for c := chunkIndex(base); c < chunkIndex(limit); c++ { - if s.chunks[c.l1()] == nil { + if p.chunks[c.l1()] == nil { // Create the necessary l2 entry. // // Store it atomically to avoid races with readers which // don't acquire the heap lock. - r := sysAlloc(unsafe.Sizeof(*s.chunks[0]), s.sysStat) - atomic.StorepNoWB(unsafe.Pointer(&s.chunks[c.l1()]), r) + r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat) + atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r) } - s.chunkOf(c).scavenged.setRange(0, pallocChunkPages) + p.chunkOf(c).scavenged.setRange(0, pallocChunkPages) } // Update summaries accordingly. The grow acts like a free, so // we need to ensure this newly-free memory is visible in the // summaries. - s.update(base, size/pageSize, true, false) + p.update(base, size/pageSize, true, false) } // update updates heap metadata. It must be called each time the bitmap @@ -411,8 +411,8 @@ func (s *pageAlloc) grow(base, size uintptr) { // a contiguous allocation or free between addr and addr+npages. alloc indicates // whether the operation performed was an allocation or a free. // -// s.mheapLock must be held. -func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { +// p.mheapLock must be held. +func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) { // base, limit, start, and end are inclusive. limit := base + npages*pageSize - 1 sc, ec := chunkIndex(base), chunkIndex(limit) @@ -421,23 +421,23 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { if sc == ec { // Fast path: the allocation doesn't span more than one chunk, // so update this one and if the summary didn't change, return. - x := s.summary[len(s.summary)-1][sc] - y := s.chunkOf(sc).summarize() + x := p.summary[len(p.summary)-1][sc] + y := p.chunkOf(sc).summarize() if x == y { return } - s.summary[len(s.summary)-1][sc] = y + p.summary[len(p.summary)-1][sc] = y } else if contig { // Slow contiguous path: the allocation spans more than one chunk // and at least one summary is guaranteed to change. - summary := s.summary[len(s.summary)-1] + summary := p.summary[len(p.summary)-1] // Update the summary for chunk sc. - summary[sc] = s.chunkOf(sc).summarize() + summary[sc] = p.chunkOf(sc).summarize() // Update the summaries for chunks in between, which are // either totally allocated or freed. - whole := s.summary[len(s.summary)-1][sc+1 : ec] + whole := p.summary[len(p.summary)-1][sc+1 : ec] if alloc { // Should optimize into a memclr. for i := range whole { @@ -450,22 +450,22 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { } // Update the summary for chunk ec. - summary[ec] = s.chunkOf(ec).summarize() + summary[ec] = p.chunkOf(ec).summarize() } else { // Slow general path: the allocation spans more than one chunk // and at least one summary is guaranteed to change. // // We can't assume a contiguous allocation happened, so walk over // every chunk in the range and manually recompute the summary. - summary := s.summary[len(s.summary)-1] + summary := p.summary[len(p.summary)-1] for c := sc; c <= ec; c++ { - summary[c] = s.chunkOf(c).summarize() + summary[c] = p.chunkOf(c).summarize() } } // Walk up the radix tree and update the summaries appropriately. changed := true - for l := len(s.summary) - 2; l >= 0 && changed; l-- { + for l := len(p.summary) - 2; l >= 0 && changed; l-- { // Update summaries at level l from summaries at level l+1. changed = false @@ -479,12 +479,12 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { // Iterate over each block, updating the corresponding summary in the less-granular level. for i := lo; i < hi; i++ { - children := s.summary[l+1][i<= s.end { + if chunkIndex(p.searchAddr.addr()) >= p.end { return 0, 0 } // If npages has a chance of fitting in the chunk where the searchAddr is, // search it directly. searchAddr := minOffAddr - if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) { + if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) { // npages is guaranteed to be no greater than pallocChunkPages here. - i := chunkIndex(s.searchAddr.addr()) - if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) { - j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr())) + i := chunkIndex(p.searchAddr.addr()) + if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) { + j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr())) if j == ^uint(0) { print("runtime: max = ", max, ", npages = ", npages, "\n") - print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n") + print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n") throw("bad summary data") } addr = chunkBase(i) + uintptr(j)*pageSize @@ -813,7 +813,7 @@ func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { } // We failed to use a searchAddr for one reason or another, so try // the slow path. - addr, searchAddr = s.find(npages) + addr, searchAddr = p.find(npages) if addr == 0 { if npages == 1 { // We failed to find a single free page, the smallest unit @@ -821,41 +821,41 @@ func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { // exhausted. Otherwise, the heap still might have free // space in it, just not enough contiguous space to // accommodate npages. - s.searchAddr = maxSearchAddr + p.searchAddr = maxSearchAddr } return 0, 0 } Found: // Go ahead and actually mark the bits now that we have an address. - scav = s.allocRange(addr, npages) + scav = p.allocRange(addr, npages) // If we found a higher searchAddr, we know that all the // heap memory before that searchAddr in an offset address space is - // allocated, so bump s.searchAddr up to the new one. - if s.searchAddr.lessThan(searchAddr) { - s.searchAddr = searchAddr + // allocated, so bump p.searchAddr up to the new one. + if p.searchAddr.lessThan(searchAddr) { + p.searchAddr = searchAddr } return addr, scav } // free returns npages worth of memory starting at base back to the page heap. // -// s.mheapLock must be held. -func (s *pageAlloc) free(base, npages uintptr) { - // If we're freeing pages below the s.searchAddr, update searchAddr. - if b := (offAddr{base}); b.lessThan(s.searchAddr) { - s.searchAddr = b +// p.mheapLock must be held. +func (p *pageAlloc) free(base, npages uintptr) { + // If we're freeing pages below the p.searchAddr, update searchAddr. + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b } // Update the free high watermark for the scavenger. limit := base + npages*pageSize - 1 - if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) { - s.scav.freeHWM = offLimit + if offLimit := (offAddr{limit}); p.scav.freeHWM.lessThan(offLimit) { + p.scav.freeHWM = offLimit } if npages == 1 { // Fast path: we're clearing a single bit, and we know exactly // where it is, so mark it directly. i := chunkIndex(base) - s.chunkOf(i).free1(chunkPageIndex(base)) + p.chunkOf(i).free1(chunkPageIndex(base)) } else { // Slow path: we're clearing more bits so we may need to iterate. sc, ec := chunkIndex(base), chunkIndex(limit) @@ -863,17 +863,17 @@ func (s *pageAlloc) free(base, npages uintptr) { if sc == ec { // The range doesn't cross any chunk boundaries. - s.chunkOf(sc).free(si, ei+1-si) + p.chunkOf(sc).free(si, ei+1-si) } else { // The range crosses at least one chunk boundary. - s.chunkOf(sc).free(si, pallocChunkPages-si) + p.chunkOf(sc).free(si, pallocChunkPages-si) for c := sc + 1; c < ec; c++ { - s.chunkOf(c).freeAll() + p.chunkOf(c).freeAll() } - s.chunkOf(ec).free(0, ei+1) + p.chunkOf(ec).free(0, ei+1) } } - s.update(base, npages, true, false) + p.update(base, npages, true, false) } const ( -- cgit v1.2.1 From 8ebc58452af3a586a3da1f68725bc83c78d4b073 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 29 Jul 2020 20:25:05 +0000 Subject: runtime: delineate which memstats are system stats with a type This change modifies the type of several mstats fields to be a new type: sysMemStat. This type has the same structure as the fields used to have. The purpose of this change is to make it very clear which stats may be used in various functions for accounting (usually the platform-specific sys* functions, but there are others). Currently there's an implicit understanding that the *uint64 value passed to these functions is some kind of statistic whose value is atomically managed. This understanding isn't inherently problematic, but we're about to change how some stats (which currently use mSysStatInc and mSysStatDec) work, so we want to make it very clear what the various requirements are around "sysStat". This change also removes mSysStatInc and mSysStatDec in favor of a method on sysMemStat. Note that those two functions were originally written the way they were because atomic 64-bit adds required a valid G on ARM, but this hasn't been the case for a very long time (since golang.org/cl/14204, but even before then it wasn't clear if mutexes required a valid G anymore). Today we implement 64-bit adds on ARM with a spinlock table. Change-Id: I4e9b37cf14afc2ae20cf736e874eb0064af086d7 Reviewed-on: https://go-review.googlesource.com/c/go/+/246971 Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Trust: Michael Knyszek Reviewed-by: Michael Pratt --- src/runtime/mpagealloc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/runtime/mpagealloc.go') diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 560babed03..2af1c97e0b 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -293,13 +293,13 @@ type pageAlloc struct { // sysStat is the runtime memstat to update when new system // memory is committed by the pageAlloc for allocation metadata. - sysStat *uint64 + sysStat *sysMemStat // Whether or not this struct is being used in tests. test bool } -func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { +func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) { if levelLogPages[0] > logMaxPackedValue { // We can't represent 1<