summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRick Hudson <rlh@golang.org>2016-07-19 14:21:23 -0400
committerRick Hudson <rlh@golang.org>2017-05-25 18:05:53 +0000
commit1d4942afe0872e5f6d95cfa0a5751ef4a9dde114 (patch)
treee3eca264c83e28557f626bc4bf619f516a9e56df
parent8b25a00e6d889c8a919922f747791478c8bdfe6f (diff)
downloadgo-git-dev.garbage.tar.gz
[dev.garbage] runtime: determine if an object is publicdev.garbage
ROC (request oriented collector) needs to determine if an object is visible to other goroutines, i.e. public. In a later CL this will be used by the write barrier and the publishing logic to distinguish between local and public objects and act accordingly. Change-Id: I6a80da9deb21f57e831a2ec04e41477f997a8c33 Reviewed-on: https://go-review.googlesource.com/25056 Reviewed-by: Austin Clements <austin@google.com>
-rw-r--r--src/runtime/mbitmap.go94
-rw-r--r--src/runtime/mheap.go7
-rw-r--r--src/runtime/stack.go5
3 files changed, 106 insertions, 0 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 3011e07a41..a7499f66e1 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -190,6 +190,90 @@ type markBits struct {
}
//go:nosplit
+func inBss(p uintptr) bool {
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if p >= datap.bss && p < datap.ebss {
+ return true
+ }
+ }
+ return false
+}
+
+//go:nosplit
+func inData(p uintptr) bool {
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if p >= datap.data && p < datap.edata {
+ return true
+ }
+ }
+ return false
+}
+
+// isPublic checks whether the object has been published.
+// ptr may not point to the start of the object.
+// This is conservative in the sense that it will return true
+// for any object that hasn't been allocated by this
+// goroutine since the last roc checkpoint was performed.
+// Must run on the system stack to prevent stack growth and
+// moving of goroutine stack.
+//go:systemstack
+func isPublic(ptr uintptr) bool {
+ if debug.gcroc == 0 {
+ // Unexpected call to ROC specific routine while not running ROC.
+ // blowup without supressing inlining.
+ _ = *(*int)(nil)
+ }
+
+ if inStack(ptr, getg().stack) {
+ return false
+ }
+
+ if getg().m != nil && getg().m.curg != nil && inStack(ptr, getg().m.curg.stack) {
+ return false
+ }
+
+ if inBss(ptr) {
+ return true
+ }
+ if inData(ptr) {
+ return true
+ }
+ if !inheap(ptr) {
+ // Note: Objects created using persistentalloc are not in the heap
+ // so any pointers from such object to local objects needs to be dealt
+ // with specially. nil is also considered not in the heap.
+ return true
+ }
+ // At this point we know the object is in the heap.
+ s := spanOf(ptr)
+ oldSweepgen := atomic.Load(&s.sweepgen)
+ sg := mheap_.sweepgen
+ if oldSweepgen != sg {
+ // We have an unswept span which means that the pointer points to a public object since it will
+ // be found to be marked once it is swept.
+ return true
+ }
+ abits := s.allocBitsForAddr(ptr)
+ if abits.isMarked() {
+ return true
+ } else if s.freeindex <= abits.index {
+ // Unmarked and beyond freeindex yet reachable object encountered.
+ // blowup without supressing inlining.
+ _ = *(*int)(nil)
+ }
+
+ // The object is not marked. If it is part of the current
+ // ROC epoch then it is not public.
+ if s.startindex*s.elemsize <= ptr-s.base() {
+ // Object allocated in this ROC epoch and since it is
+ // not marked it has not been published.
+ return false
+ }
+ // Object allocated since last GC but in a previous ROC epoch so it is public.
+ return true
+}
+
+//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
whichByte := allocBitIndex / 8
whichBit := allocBitIndex % 8
@@ -197,6 +281,16 @@ func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
}
+//go:nosplit
+func (s *mspan) allocBitsForAddr(p uintptr) markBits {
+ byteOffset := p - s.base()
+ allocBitIndex := byteOffset / s.elemsize
+ whichByte := allocBitIndex / 8
+ whichBit := allocBitIndex % 8
+ bytePtr := addb(s.allocBits, whichByte)
+ return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
+}
+
// refillaCache takes 8 bytes s.allocBits starting at whichByte
// and negates them so that ctz (count trailing zeros) instructions
// can be used. It then places these 8 bytes into the cached 64 bit
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 4cf08e46a7..09dc0d51ad 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -195,6 +195,13 @@ type mspan struct {
// helps performance.
nelems uintptr // number of object in the span.
+ // startindex is the object index where the owner G started allocating in this span.
+ //
+ // This is used in conjunction with nextUsedSpan to implement ROC checkpoints and recycles.
+ startindex uintptr
+ // nextUsedSpan links together all spans that have the same span class and owner G.
+ nextUsedSpan *mspan
+
// Cache of the allocBits at freeindex. allocCache is shifted
// such that the lowest bit corresponds to the bit freeindex.
// allocCache holds the complement of allocBits, thus allowing
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 0f1a5c1c55..1e59d38994 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -1225,3 +1225,8 @@ func morestackc() {
throw("attempt to execute C code on Go stack")
})
}
+
+//go:nosplit
+func inStack(p uintptr, s stack) bool {
+ return s.lo <= p && p < s.hi
+}