diff options
author | Russ Cox <rsc@golang.org> | 2014-11-11 17:09:09 -0500 |
---|---|---|
committer | Russ Cox <rsc@golang.org> | 2014-11-11 17:09:09 -0500 |
commit | 9204821731bfdcbef0bd4af0c4d40acde23ec7ab (patch) | |
tree | 1f2160f2e7a2aa30efdbbaade148f45423d9d4f4 /src/runtime/stubs.go | |
parent | e785e3acf8a4845ada7bed96f1a88355b1e0cde3 (diff) | |
download | go-git-9204821731bfdcbef0bd4af0c4d40acde23ec7ab.tar.gz |
[dev.cc] runtime: convert arch-specific .c and .h files to Go
The conversion was done with an automated tool and then
modified only as necessary to make it compile and run.
vlrt.c was only called from C. Pure delete.
[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]
LGTM=r
R=r, austin
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/174860043
Diffstat (limited to 'src/runtime/stubs.go')
-rw-r--r-- | src/runtime/stubs.go | 77 |
1 files changed, 18 insertions, 59 deletions
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index fe8f9c9222..2be4ad5315 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -23,12 +23,7 @@ func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(p) + delta) } -// in runtime.c func getg() *g -func acquirem() *m -func releasem(mp *m) -func gomcache() *mcache -func readgstatus(*g) uint32 // proc.c // mcall switches from the g to the g0 stack and invokes fn(g), // where g is the goroutine that made the call. @@ -95,33 +90,6 @@ func badonm() { gothrow("onM called from signal goroutine") } -// C functions that run on the M stack. -// Call using mcall. -func gosched_m(*g) -func park_m(*g) -func recovery_m(*g) - -// More C functions that run on the M stack. -// Call using onM. -func mcacheRefill_m() -func largeAlloc_m() -func gc_m() -func scavenge_m() -func setFinalizer_m() -func removeFinalizer_m() -func markallocated_m() -func unrollgcprog_m() -func unrollgcproginplace_m() -func setgcpercent_m() -func setmaxthreads_m() -func ready_m() -func deferproc_m() -func goexit_m() -func startpanic_m() -func dopanic_m() -func readmemstats_m() -func writeheapdump_m() - // memclr clears n bytes starting at ptr. // in memclr_*.s //go:noescape @@ -132,12 +100,6 @@ func memclr(ptr unsafe.Pointer, n uintptr) //go:noescape func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) -func starttheworld() -func stoptheworld() -func newextram() -func lockOSThread() -func unlockOSThread() - // exported value for testing var hashLoad = loadFactor @@ -159,11 +121,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { return unsafe.Pointer(x ^ 0) } -func entersyscall() -func reentersyscall(pc uintptr, sp unsafe.Pointer) -func entersyscallblock() -func exitsyscall() - func cgocallback(fn, frame unsafe.Pointer, framesize uintptr) func gogo(buf *gobuf) func gosave(buf *gobuf) @@ -181,20 +138,12 @@ func breakpoint() func nanotime() int64 func usleep(usec uint32) -// careful: cputicks is not guaranteed to be monotonic! In particular, we have -// noticed drift between cpus on certain os/arch combinations. See issue 8976. -func cputicks() int64 - func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer func munmap(addr unsafe.Pointer, n uintptr) func madvise(addr unsafe.Pointer, n uintptr, flags int32) func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32) -func osyield() func procyield(cycles uint32) func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) -func readgogc() int32 -func purgecachedstats(c *mcache) -func gostringnocopy(b *byte) string func goexit() //go:noescape @@ -203,8 +152,21 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32 //go:noescape func cas(ptr *uint32, old, new uint32) bool -//go:noescape -func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool +// casp cannot have a go:noescape annotation, because +// while ptr and old do not escape, new does. If new is marked as +// not escaping, the compiler will make incorrect escape analysis +// decisions about the value being xchg'ed. +// Instead, make casp a wrapper around the actual atomic. +// When calling the wrapper we mark ptr as noescape explicitly. + +//go:nosplit +func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { + return casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) +} + +func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +func nop() // call to prevent inlining of function body //go:noescape func casuintptr(ptr *uintptr, old, new uintptr) bool @@ -264,15 +226,10 @@ func asmcgocall_errno(fn, arg unsafe.Pointer) int32 //go:noescape func open(name *byte, mode, perm int32) int32 -//go:noescape -func gotraceback(*bool) int32 - +// argp used in Defer structs when there is no argp. const _NoArgs = ^uintptr(0) -func newstack() -func newproc() func morestack() -func mstart() func rt0_go() // return0 is a stub used to return 0 from deferproc. @@ -314,3 +271,5 @@ func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32) func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32) func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32) func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32) + +func switchtoM() |