diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2010-12-03 04:34:57 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2010-12-03 04:34:57 +0000 |
commit | e440a3286bc89368b8d3a8fd6accd47191790bf2 (patch) | |
tree | 38fe54a4f38ede5d949c915d66191f24a6fe5153 /libgo/go/runtime | |
parent | a641ee368e2614349084a9a7bda2ec2b0b2bc1cf (diff) | |
download | gcc-e440a3286bc89368b8d3a8fd6accd47191790bf2.tar.gz |
Add Go frontend, libgo library, and Go testsuite.
gcc/:
* gcc.c (default_compilers): Add entry for ".go".
* common.opt: Add -static-libgo as a driver option.
* doc/install.texi (Configuration): Mention libgo as an option for
--enable-shared. Mention go as an option for --enable-languages.
* doc/invoke.texi (Overall Options): Mention .go as a file name
suffix. Mention go as a -x option.
* doc/frontends.texi (G++ and GCC): Mention Go as a supported
language.
* doc/sourcebuild.texi (Top Level): Mention libgo.
* doc/standards.texi (Standards): Add section on Go language.
Move references for other languages into their own section.
* doc/contrib.texi (Contributors): Mention that I contributed the
Go frontend.
gcc/testsuite/:
* lib/go.exp: New file.
* lib/go-dg.exp: New file.
* lib/go-torture.exp: New file.
* lib/target-supports.exp (check_compile): Match // Go.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@167407 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r-- | libgo/go/runtime/debug.go | 143 | ||||
-rw-r--r-- | libgo/go/runtime/error.go | 133 | ||||
-rw-r--r-- | libgo/go/runtime/export_test.go | 17 | ||||
-rw-r--r-- | libgo/go/runtime/extern.go | 181 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof.go | 108 | ||||
-rw-r--r-- | libgo/go/runtime/sig.go | 16 | ||||
-rw-r--r-- | libgo/go/runtime/softfloat64.go | 498 | ||||
-rw-r--r-- | libgo/go/runtime/softfloat64_test.go | 198 | ||||
-rw-r--r-- | libgo/go/runtime/type.go | 206 |
9 files changed, 1500 insertions, 0 deletions
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go new file mode 100644 index 00000000000..b5f6571faa8 --- /dev/null +++ b/libgo/go/runtime/debug.go @@ -0,0 +1,143 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// Breakpoint() executes a breakpoint trap. +func Breakpoint() + +// LockOSThread wires the calling goroutine to its current operating system thread. +// Until the calling goroutine exits or calls UnlockOSThread, it will always +// execute in that thread, and no other goroutine can. +// LockOSThread cannot be used during init functions. +func LockOSThread() + +// UnlockOSThread unwires the calling goroutine from its fixed operating system thread. +// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. +func UnlockOSThread() + +// GOMAXPROCS sets the maximum number of CPUs that can be executing +// simultaneously and returns the previous setting. If n < 1, it does not +// change the current setting. +// This call will go away when the scheduler improves. +func GOMAXPROCS(n int) int + +// Cgocalls returns the number of cgo calls made by the current process. +func Cgocalls() int64 + +type MemStatsType struct { + // General statistics. + // Not locked during update; approximate. + Alloc uint64 // bytes allocated and still in use + TotalAlloc uint64 // bytes allocated (even if freed) + Sys uint64 // bytes obtained from system (should be sum of XxxSys below) + Lookups uint64 // number of pointer lookups + Mallocs uint64 // number of mallocs + + // Main allocation heap statistics. + HeapAlloc uint64 // bytes allocated and still in use + HeapSys uint64 // bytes obtained from system + HeapIdle uint64 // bytes in idle spans + HeapInuse uint64 // bytes in non-idle span + HeapObjects uint64 // total number of allocated objects + + // Low-level fixed-size structure allocator statistics. + // Inuse is bytes used now. + // Sys is bytes obtained from system. + StackInuse uint64 // bootstrap stacks + StackSys uint64 + MSpanInuse uint64 // mspan structures + MSpanSys uint64 + MCacheInuse uint64 // mcache structures + MCacheSys uint64 + MHeapMapSys uint64 // heap map + BuckHashSys uint64 // profiling bucket hash table + + // Garbage collector statistics. + NextGC uint64 + PauseNs uint64 + NumGC uint32 + EnableGC bool + DebugGC bool + + // Per-size allocation statistics. + // Not locked during update; approximate. + BySize [67]struct { + Size uint32 + Mallocs uint64 + Frees uint64 + } +} + +// MemStats holds statistics about the memory system. +// The statistics are only approximate, as they are not interlocked on update. +var MemStats MemStatsType + +// Alloc allocates a block of the given size. +// FOR TESTING AND DEBUGGING ONLY. +func Alloc(uintptr) *byte + +// Free frees the block starting at the given pointer. +// FOR TESTING AND DEBUGGING ONLY. +func Free(*byte) + +// Lookup returns the base and size of the block containing the given pointer. +// FOR TESTING AND DEBUGGING ONLY. +func Lookup(*byte) (*byte, uintptr) + +// GC runs a garbage collection. +func GC() + +// MemProfileRate controls the fraction of memory allocations +// that are recorded and reported in the memory profile. +// The profiler aims to sample an average of +// one allocation per MemProfileRate bytes allocated. +// +// To include every allocated block in the profile, set MemProfileRate to 1. +// To turn off profiling entirely, set MemProfileRate to 0. +// +// The tools that process the memory profiles assume that the +// profile rate is constant across the lifetime of the program +// and equal to the current value. Programs that change the +// memory profiling rate should do so just once, as early as +// possible in the execution of the program (for example, +// at the beginning of main). +var MemProfileRate int = 512 * 1024 + +// A MemProfileRecord describes the live objects allocated +// by a particular call sequence (stack trace). +type MemProfileRecord struct { + AllocBytes, FreeBytes int64 // number of bytes allocated, freed + AllocObjects, FreeObjects int64 // number of objects allocated, freed + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). +func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } + +// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). +func (r *MemProfileRecord) InUseObjects() int64 { + return r.AllocObjects - r.FreeObjects +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *MemProfileRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfile returns n, the number of records in the current memory profile. +// If len(p) >= n, MemProfile copies the profile into p and returns n, true. +// If len(p) < n, MemProfile does not change p and returns n, false. +// +// If inuseZero is true, the profile includes allocation records +// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. +// These are sites where memory was allocated, but it has all +// been released back to the runtime. +func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go new file mode 100644 index 00000000000..2515722aaa4 --- /dev/null +++ b/libgo/go/runtime/error.go @@ -0,0 +1,133 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// The Error interface identifies a run time error. +type Error interface { + String() string + + // RuntimeError is a no-op function but + // serves to distinguish types that are runtime + // errors from ordinary os.Errors: a type is a + // runtime error if it has a RuntimeError method. + RuntimeError() +} + +// A TypeAssertionError explains a failed type assertion. +type TypeAssertionError struct { + interfaceType *Type // interface had this type + concreteType *Type // concrete value had this type + assertedType *Type // asserted type + interfaceString string + concreteString string + assertedString string + missingMethod string // one method needed by Interface, missing from Concrete +} + +func (*TypeAssertionError) RuntimeError() {} + +func (e *TypeAssertionError) String() string { + inter := e.interfaceString + if inter == "" { + inter = "interface" + } + if e.concreteType == nil { + return "interface conversion: " + inter + " is nil, not " + e.assertedString + } + if e.missingMethod == "" { + return "interface conversion: " + inter + " is " + e.concreteString + + ", not " + e.assertedString + } + return "interface conversion: " + e.concreteString + " is not " + e.assertedString + + ": missing method " + e.missingMethod +} + +// Concrete returns the type of the concrete value in the failed type assertion. +// If the interface value was nil, Concrete returns nil. +func (e *TypeAssertionError) Concrete() *Type { + return e.concreteType +} + +// Asserted returns the type incorrectly asserted by the type assertion. +func (e *TypeAssertionError) Asserted() *Type { + return e.assertedType +} + +// If the type assertion is to an interface type, MissingMethod returns the +// name of a method needed to satisfy that interface type but not implemented +// by Concrete. If there are multiple such methods, +// MissingMethod returns one; which one is unspecified. +// If the type assertion is not to an interface type, MissingMethod returns an empty string. +func (e *TypeAssertionError) MissingMethod() string { + return e.missingMethod +} + +// For calling from C. +func NewTypeAssertionError(pt1, pt2, pt3 *Type, ps1, ps2, ps3 *string, pmeth *string, ret *interface{}) { + var t1, t2, t3 *Type + var s1, s2, s3, meth string + + if pt1 != nil { + t1 = pt1 + } + if pt2 != nil { + t2 = pt2 + } + if pt3 != nil { + t3 = pt3 + } + if ps1 != nil { + s1 = *ps1 + } + if ps2 != nil { + s2 = *ps2 + } + if ps3 != nil { + s3 = *ps3 + } + if pmeth != nil { + meth = *pmeth + } + *ret = &TypeAssertionError{t1, t2, t3, s1, s2, s3, meth} +} + +// An errorString represents a runtime error described by a single string. +type errorString string + +func (e errorString) RuntimeError() {} + +func (e errorString) String() string { + return "runtime error: " + string(e) +} + +// For calling from C. +func NewErrorString(s string, ret *interface{}) { + *ret = errorString(s) +} + +type stringer interface { + String() string +} + +func typestring(interface{}) string + +// For calling from C. +// Prints an argument passed to panic. +// There's room for arbitrary complexity here, but we keep it +// simple and handle just a few important cases: int, string, and Stringer. +func Printany(i interface{}) { + switch v := i.(type) { + case nil: + print("nil") + case stringer: + print(v.String()) + case int: + print(v) + case string: + print(v) + default: + print("(", typestring(i), ") ", i) + } +} diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go new file mode 100644 index 00000000000..58631c7b4b5 --- /dev/null +++ b/libgo/go/runtime/export_test.go @@ -0,0 +1,17 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing. + +package runtime + +var Fadd64 = fadd64 +var Fsub64 = fsub64 +var Fmul64 = fmul64 +var Fdiv64 = fdiv64 +var F64to32 = f64to32 +var F32to64 = f32to64 +var Fcmp64 = fcmp64 +var Fintto64 = fintto64 +var F64toint = f64toint diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go new file mode 100644 index 00000000000..8ab57d03f63 --- /dev/null +++ b/libgo/go/runtime/extern.go @@ -0,0 +1,181 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + The runtime package contains operations that interact with Go's runtime system, + such as functions to control goroutines. It also includes the low-level type information + used by the reflect package; see reflect's documentation for the programmable + interface to the run-time type system. +*/ +package runtime + +// Gosched yields the processor, allowing other goroutines to run. It does not +// suspend the current goroutine, so execution resumes automatically. +func Gosched() + +// Goexit terminates the goroutine that calls it. No other goroutine is affected. +// Goexit runs all deferred calls before terminating the goroutine. +func Goexit() + +// Caller reports file and line number information about function invocations on +// the calling goroutine's stack. The argument skip is the number of stack frames to +// ascend, with 0 identifying the the caller of Caller. The return values report the +// program counter, file name, and line number within the file of the corresponding +// call. The boolean ok is false if it was not possible to recover the information. +func Caller(skip int) (pc uintptr, file string, line int, ok bool) + +// Callers fills the slice pc with the program counters of function invocations +// on the calling goroutine's stack. The argument skip is the number of stack frames +// to skip before recording in pc, with 0 starting at the caller of Caller. +// It returns the number of entries written to pc. +func Callers(skip int, pc []uintptr) int + +// FuncForPC returns a *Func describing the function that contains the +// given program counter address, or else nil. +func FuncForPC(pc uintptr) *Func + +// NOTE(rsc): Func must match struct Func in runtime.h + +// Func records information about a function in the program, +// in particular the mapping from program counters to source +// line numbers within that function. +type Func struct { + name string + typ string + src string + pcln []byte + entry uintptr + pc0 uintptr + ln0 int32 + frame int32 + args int32 + locals int32 +} + +// Name returns the name of the function. +func (f *Func) Name() string { return f.name } + +// Entry returns the entry address of the function. +func (f *Func) Entry() uintptr { return f.entry } + +// FileLine returns the file name and line number of the +// source code corresponding to the program counter pc. +// The result will not be accurate if pc is not a program +// counter within f. +func (f *Func) FileLine(pc uintptr) (file string, line int) { + // NOTE(rsc): If you edit this function, also edit + // symtab.c:/^funcline. + var pcQuant uintptr = 1 + if GOARCH == "arm" { + pcQuant = 4 + } + + targetpc := pc + p := f.pcln + pc = f.pc0 + line = int(f.ln0) + file = f.src + for i := 0; i < len(p) && pc <= targetpc; i++ { + switch { + case p[i] == 0: + line += int(p[i+1]<<24) | int(p[i+2]<<16) | int(p[i+3]<<8) | int(p[i+4]) + i += 4 + case p[i] <= 64: + line += int(p[i]) + case p[i] <= 128: + line -= int(p[i] - 64) + default: + pc += pcQuant * uintptr(p[i]-129) + } + pc += pcQuant + } + return +} + +// mid returns the current os thread (m) id. +func mid() uint32 + +// Semacquire waits until *s > 0 and then atomically decrements it. +// It is intended as a simple sleep primitive for use by the synchronization +// library and should not be used directly. +func Semacquire(s *uint32) + +// Semrelease atomically increments *s and notifies a waiting goroutine +// if one is blocked in Semacquire. +// It is intended as a simple wakeup primitive for use by the synchronization +// library and should not be used directly. +func Semrelease(s *uint32) + +// SetFinalizer sets the finalizer associated with x to f. +// When the garbage collector finds an unreachable block +// with an associated finalizer, it clears the association and runs +// f(x) in a separate goroutine. This makes x reachable again, but +// now without an associated finalizer. Assuming that SetFinalizer +// is not called again, the next time the garbage collector sees +// that x is unreachable, it will free x. +// +// SetFinalizer(x, nil) clears any finalizer associated with x. +// +// The argument x must be a pointer to an object allocated by +// calling new or by taking the address of a composite literal. +// The argument f must be a function that takes a single argument +// of x's type and returns no arguments. If either of these is not +// true, SetFinalizer aborts the program. +// +// Finalizers are run in dependency order: if A points at B, both have +// finalizers, and they are otherwise unreachable, only the finalizer +// for A runs; once A is freed, the finalizer for B can run. +// If a cyclic structure includes a block with a finalizer, that +// cycle is not guaranteed to be garbage collected and the finalizer +// is not guaranteed to run, because there is no ordering that +// respects the dependencies. +// +// The finalizer for x is scheduled to run at some arbitrary time after +// x becomes unreachable. +// There is no guarantee that finalizers will run before a program exits, +// so typically they are useful only for releasing non-memory resources +// associated with an object during a long-running program. +// For example, an os.File object could use a finalizer to close the +// associated operating system file descriptor when a program discards +// an os.File without calling Close, but it would be a mistake +// to depend on a finalizer to flush an in-memory I/O buffer such as a +// bufio.Writer, because the buffer would not be flushed at program exit. +// +// A single goroutine runs all finalizers for a program, sequentially. +// If a finalizer must run for a long time, it should do so by starting +// a new goroutine. +// +// TODO(rsc): allow f to have (ignored) return values +// +func SetFinalizer(x, f interface{}) + +func getgoroot() string + +// GOROOT returns the root of the Go tree. +// It uses the GOROOT environment variable, if set, +// or else the root used during the Go build. +func GOROOT() string { + s := getgoroot() + if s != "" { + return s + } + return defaultGoroot +} + +// Version returns the Go tree's version string. +// It is either a sequence number or, when possible, +// a release tag like "release.2010-03-04". +// A trailing + indicates that the tree had local modifications +// at the time of the build. +func Version() string { + return theVersion +} + +// GOOS is the Go tree's operating system target: +// one of darwin, freebsd, linux, and so on. +const GOOS string = theGoos + +// GOARCH is the Go tree's architecture target: +// 386, amd64, or arm. +const GOARCH string = theGoarch diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go new file mode 100644 index 00000000000..d0cc730899d --- /dev/null +++ b/libgo/go/runtime/pprof/pprof.go @@ -0,0 +1,108 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pprof writes runtime profiling data in the format expected +// by the pprof visualization tool. +// For more information about pprof, see +// http://code.google.com/p/google-perftools/. +package pprof + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" +) + +// WriteHeapProfile writes a pprof-formatted heap profile to w. +// If a write to w returns an error, WriteHeapProfile returns that error. +// Otherwise, WriteHeapProfile returns nil. +func WriteHeapProfile(w io.Writer) os.Error { + // Find out how many records there are (MemProfile(nil, false)), + // allocate that many records, and get the data. + // There's a race—more records might be added between + // the two calls—so allocate a few extra records for safety + // and also try again if we're very unlucky. + // The loop should only execute one iteration in the common case. + var p []runtime.MemProfileRecord + n, ok := runtime.MemProfile(nil, false) + for { + // Allocate room for a slightly bigger profile, + // in case a few more entries have been added + // since the call to MemProfile. + p = make([]runtime.MemProfileRecord, n+50) + n, ok = runtime.MemProfile(p, false) + if ok { + p = p[0:n] + break + } + // Profile grew; try again. + } + + var total runtime.MemProfileRecord + for i := range p { + r := &p[i] + total.AllocBytes += r.AllocBytes + total.AllocObjects += r.AllocObjects + total.FreeBytes += r.FreeBytes + total.FreeObjects += r.FreeObjects + } + + // Technically the rate is MemProfileRate not 2*MemProfileRate, + // but early versions of the C++ heap profiler reported 2*MemProfileRate, + // so that's what pprof has come to expect. + b := bufio.NewWriter(w) + fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n", + total.InUseObjects(), total.InUseBytes(), + total.AllocObjects, total.AllocBytes, + 2*runtime.MemProfileRate) + + for i := range p { + r := &p[i] + fmt.Fprintf(b, "%d: %d [%d: %d] @", + r.InUseObjects(), r.InUseBytes(), + r.AllocObjects, r.AllocBytes) + for _, pc := range r.Stack() { + fmt.Fprintf(b, " %#x", pc) + } + fmt.Fprintf(b, "\n") + } + + // Print memstats information too. + // Pprof will ignore, but useful for people. + s := &runtime.MemStats + fmt.Fprintf(b, "\n# runtime.MemStats\n") + fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc) + fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc) + fmt.Fprintf(b, "# Sys = %d\n", s.Sys) + fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups) + fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs) + + fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc) + fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys) + fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle) + fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse) + + fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys) + fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys) + fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys) + fmt.Fprintf(b, "# MHeapMapSys = %d\n", s.MHeapMapSys) + fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys) + + fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC) + fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs) + fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC) + fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC) + fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC) + + fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n") + fmt.Fprintf(b, "# (Excluding large blocks.)\n") + for _, t := range s.BySize { + if t.Mallocs > 0 { + fmt.Fprintf(b, "# %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees) + } + } + return b.Flush() +} diff --git a/libgo/go/runtime/sig.go b/libgo/go/runtime/sig.go new file mode 100644 index 00000000000..6d560b90077 --- /dev/null +++ b/libgo/go/runtime/sig.go @@ -0,0 +1,16 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// Sigrecv returns a bitmask of signals that have arrived since the last call to Sigrecv. +// It blocks until at least one signal arrives. +func Sigrecv() uint32 + +// Signame returns a string describing the signal, or "" if the signal is unknown. +func Signame(sig int32) string + +// Siginit enables receipt of signals via Sigrecv. It should typically +// be called during initialization. +func Siginit() diff --git a/libgo/go/runtime/softfloat64.go b/libgo/go/runtime/softfloat64.go new file mode 100644 index 00000000000..d9bbe5def68 --- /dev/null +++ b/libgo/go/runtime/softfloat64.go @@ -0,0 +1,498 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Software IEEE754 64-bit floating point. +// Only referred to (and thus linked in) by arm port +// and by gotest in this directory. + +package runtime + +const ( + mantbits64 uint = 52 + expbits64 uint = 11 + bias64 = -1<<(expbits64-1) + 1 + + nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1 + inf64 uint64 = (1<<expbits64 - 1) << mantbits64 + neg64 uint64 = 1 << (expbits64 + mantbits64) + + mantbits32 uint = 23 + expbits32 uint = 8 + bias32 = -1<<(expbits32-1) + 1 + + nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1 + inf32 uint32 = (1<<expbits32 - 1) << mantbits32 + neg32 uint32 = 1 << (expbits32 + mantbits32) +) + +func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) { + sign = f & (1 << (mantbits64 + expbits64)) + mant = f & (1<<mantbits64 - 1) + exp = int(f>>mantbits64) & (1<<expbits64 - 1) + + switch exp { + case 1<<expbits64 - 1: + if mant != 0 { + nan = true + return + } + inf = true + return + + case 0: + // denormalized + if mant != 0 { + exp += bias64 + 1 + for mant < 1<<mantbits64 { + mant <<= 1 + exp-- + } + } + + default: + // add implicit top bit + mant |= 1 << mantbits64 + exp += bias64 + } + return +} + +func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) { + sign = f & (1 << (mantbits32 + expbits32)) + mant = f & (1<<mantbits32 - 1) + exp = int(f>>mantbits32) & (1<<expbits32 - 1) + + switch exp { + case 1<<expbits32 - 1: + if mant != 0 { + nan = true + return + } + inf = true + return + + case 0: + // denormalized + if mant != 0 { + exp += bias32 + 1 + for mant < 1<<mantbits32 { + mant <<= 1 + exp-- + } + } + + default: + // add implicit top bit + mant |= 1 << mantbits32 + exp += bias32 + } + return +} + +func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 { + mant0, exp0, trunc0 := mant, exp, trunc + if mant == 0 { + return sign + } + for mant < 1<<mantbits64 { + mant <<= 1 + exp-- + } + for mant >= 4<<mantbits64 { + trunc |= mant & 1 + mant >>= 1 + exp++ + } + if mant >= 2<<mantbits64 { + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + if mant >= 4<<mantbits64 { + mant >>= 1 + exp++ + } + } + mant >>= 1 + exp++ + } + if exp >= 1<<expbits64-1+bias64 { + return sign ^ inf64 + } + if exp < bias64+1 { + if exp < bias64-int(mantbits64) { + return sign | 0 + } + // repeat expecting denormal + mant, exp, trunc = mant0, exp0, trunc0 + for exp < bias64 { + trunc |= mant & 1 + mant >>= 1 + exp++ + } + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + } + mant >>= 1 + exp++ + if mant < 1<<mantbits64 { + return sign | mant + } + } + return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1) +} + +func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 { + mant0, exp0, trunc0 := mant, exp, trunc + if mant == 0 { + return sign + } + for mant < 1<<mantbits32 { + mant <<= 1 + exp-- + } + for mant >= 4<<mantbits32 { + trunc |= mant & 1 + mant >>= 1 + exp++ + } + if mant >= 2<<mantbits32 { + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + if mant >= 4<<mantbits32 { + mant >>= 1 + exp++ + } + } + mant >>= 1 + exp++ + } + if exp >= 1<<expbits32-1+bias32 { + return sign ^ inf32 + } + if exp < bias32+1 { + if exp < bias32-int(mantbits32) { + return sign | 0 + } + // repeat expecting denormal + mant, exp, trunc = mant0, exp0, trunc0 + for exp < bias32 { + trunc |= mant & 1 + mant >>= 1 + exp++ + } + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + } + mant >>= 1 + exp++ + if mant < 1<<mantbits32 { + return sign | mant + } + } + return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1) +} + +func fadd64(f, g uint64) uint64 { + fs, fm, fe, fi, fn := funpack64(f) + gs, gm, ge, gi, gn := funpack64(g) + + // Special cases. + switch { + case fn || gn: // NaN + x or x + NaN = NaN + return nan64 + + case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN + return nan64 + + case fi: // ±Inf + g = ±Inf + return f + + case gi: // f + ±Inf = ±Inf + return g + + case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0 + return f + + case fm == 0: // 0 + g = g but 0 + -0 = +0 + if gm == 0 { + g ^= gs + } + return g + + case gm == 0: // f + 0 = f + return f + + } + + if fe < ge || fe == ge && fm < gm { + f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe + } + + shift := uint(fe - ge) + fm <<= 2 + gm <<= 2 + trunc := gm & (1<<shift - 1) + gm >>= shift + if fs == gs { + fm += gm + } else { + fm -= gm + if trunc != 0 { + fm-- + } + } + if fm == 0 { + fs = 0 + } + return fpack64(fs, fm, fe-2, trunc) +} + +func fsub64(f, g uint64) uint64 { + return fadd64(f, fneg64(g)) +} + +func fneg64(f uint64) uint64 { + return f ^ (1 << (mantbits64 + expbits64)) +} + +func fmul64(f, g uint64) uint64 { + fs, fm, fe, fi, fn := funpack64(f) + gs, gm, ge, gi, gn := funpack64(g) + + // Special cases. + switch { + case fn || gn: // NaN * g or f * NaN = NaN + return nan64 + + case fi && gi: // Inf * Inf = Inf (with sign adjusted) + return f ^ gs + + case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN + return nan64 + + case fm == 0: // 0 * x = 0 (with sign adjusted) + return f ^ gs + + case gm == 0: // x * 0 = 0 (with sign adjusted) + return g ^ fs + } + + // 53-bit * 53-bit = 107- or 108-bit + lo, hi := mullu(fm, gm) + shift := mantbits64 - 1 + trunc := lo & (1<<shift - 1) + mant := hi<<(64-shift) | lo>>shift + return fpack64(fs^gs, mant, fe+ge-1, trunc) +} + +func fdiv64(f, g uint64) uint64 { + fs, fm, fe, fi, fn := funpack64(f) + gs, gm, ge, gi, gn := funpack64(g) + + // Special cases. + switch { + case fn || gn: // NaN / g = f / NaN = NaN + return nan64 + + case fi && gi: // ±Inf / ±Inf = NaN + return nan64 + + case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN + return nan64 + + case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf + return fs ^ gs ^ inf64 + + case gi, fm == 0: // f / Inf = 0 / g = Inf + return fs ^ gs ^ 0 + } + _, _, _, _ = fi, fn, gi, gn + + // 53-bit<<54 / 53-bit = 53- or 54-bit. + shift := mantbits64 + 2 + q, r := divlu(fm>>(64-shift), fm<<shift, gm) + return fpack64(fs^gs, q, fe-ge-2, r) +} + +func f64to32(f uint64) uint32 { + fs, fm, fe, fi, fn := funpack64(f) + if fn { + return nan32 + } + fs32 := uint32(fs >> 32) + if fi { + return fs32 ^ inf32 + } + const d = mantbits64 - mantbits32 - 1 + return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1))) +} + +func f32to64(f uint32) uint64 { + const d = mantbits64 - mantbits32 + fs, fm, fe, fi, fn := funpack32(f) + if fn { + return nan64 + } + fs64 := uint64(fs) << 32 + if fi { + return fs64 ^ inf64 + } + return fpack64(fs64, uint64(fm)<<d, fe, 0) +} + +func fcmp64(f, g uint64) (cmp int, isnan bool) { + fs, fm, _, fi, fn := funpack64(f) + gs, gm, _, gi, gn := funpack64(g) + + switch { + case fn, gn: // flag NaN + return 0, true + + case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0 + return 0, false + + case fs > gs: // f < 0, g > 0 + return -1, false + + case fs < gs: // f > 0, g < 0 + return +1, false + + // Same sign, not NaN. + // Can compare encodings directly now. + // Reverse for sign. + case fs == 0 && f < g, fs != 0 && f > g: + return -1, false + + case fs == 0 && f > g, fs != 0 && f < g: + return +1, false + } + + // f == g + return 0, false +} + +func f64toint(f uint64) (val int64, ok bool) { + fs, fm, fe, fi, fn := funpack64(f) + + switch { + case fi, fn: // NaN + return 0, false + + case fe < -1: // f < 0.5 + return 0, false + + case fe > 63: // f >= 2^63 + if fs != 0 && fm == 0 { // f == -2^63 + return -1 << 63, true + } + if fs != 0 { + return 0, false + } + return 0, false + } + + for fe > int(mantbits64) { + fe-- + fm <<= 1 + } + for fe < int(mantbits64) { + fe++ + fm >>= 1 + } + val = int64(fm) + if fs != 0 { + val = -val + } + return val, true +} + +func fintto64(val int64) (f uint64) { + fs := uint64(val) & (1 << 63) + mant := uint64(val) + if fs != 0 { + mant = -mant + } + return fpack64(fs, mant, int(mantbits64), 0) +} + +// 64x64 -> 128 multiply. +// adapted from hacker's delight. +func mullu(u, v uint64) (lo, hi uint64) { + const ( + s = 32 + mask = 1<<s - 1 + ) + u0 := u & mask + u1 := u >> s + v0 := v & mask + v1 := v >> s + w0 := u0 * v0 + t := u1*v0 + w0>>s + w1 := t & mask + w2 := t >> s + w1 += u0 * v1 + return u * v, u1*v1 + w2 + w1>>s +} + +// 128/64 -> 64 quotient, 64 remainder. +// adapted from hacker's delight +func divlu(u1, u0, v uint64) (q, r uint64) { + const b = 1 << 32 + + if u1 >= v { + return 1<<64 - 1, 1<<64 - 1 + } + + // s = nlz(v); v <<= s + s := uint(0) + for v&(1<<63) == 0 { + s++ + v <<= 1 + } + + vn1 := v >> 32 + vn0 := v & (1<<32 - 1) + un32 := u1<<s | u0>>(64-s) + un10 := u0 << s + un1 := un10 >> 32 + un0 := un10 & (1<<32 - 1) + q1 := un32 / vn1 + rhat := un32 - q1*vn1 + +again1: + if q1 >= b || q1*vn0 > b*rhat+un1 { + q1-- + rhat += vn1 + if rhat < b { + goto again1 + } + } + + un21 := un32*b + un1 - q1*v + q0 := un21 / vn1 + rhat = un21 - q0*vn1 + +again2: + if q0 >= b || q0*vn0 > b*rhat+un0 { + q0-- + rhat += vn1 + if rhat < b { + goto again2 + } + } + + return q1*b + q0, (un21*b + un0 - q0*v) >> s +} + +// callable from C + +func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) } +func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) } +func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) } +func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) } +func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) } +func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) } +func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) } +func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) } +func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) } +func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) } diff --git a/libgo/go/runtime/softfloat64_test.go b/libgo/go/runtime/softfloat64_test.go new file mode 100644 index 00000000000..fb7f3d3c00c --- /dev/null +++ b/libgo/go/runtime/softfloat64_test.go @@ -0,0 +1,198 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "math" + "rand" + . "runtime" + "testing" +) + +// turn uint64 op into float64 op +func fop(f func(x, y uint64) uint64) func(x, y float64) float64 { + return func(x, y float64) float64 { + bx := math.Float64bits(x) + by := math.Float64bits(y) + return math.Float64frombits(f(bx, by)) + } +} + +func add(x, y float64) float64 { return x + y } +func sub(x, y float64) float64 { return x - y } +func mul(x, y float64) float64 { return x * y } +func div(x, y float64) float64 { return x / y } + +func TestFloat64(t *testing.T) { + base := []float64{ + 0, + math.Copysign(0, -1), + -1, + 1, + math.NaN(), + math.Inf(+1), + math.Inf(-1), + 0.1, + 1.5, + 1.9999999999999998, // all 1s mantissa + 1.3333333333333333, // 1.010101010101... + 1.1428571428571428, // 1.001001001001... + 1.112536929253601e-308, // first normal + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256, + 3, + 12, + 1234, + 123456, + -0.1, + -1.5, + -1.9999999999999998, + -1.3333333333333333, + -1.1428571428571428, + -2, + -3, + 1e-200, + 1e-300, + 1e-310, + 5e-324, + 1e-105, + 1e-305, + 1e+200, + 1e+306, + 1e+307, + 1e+308, + } + all := make([]float64, 200) + copy(all, base) + for i := len(base); i < len(all); i++ { + all[i] = rand.NormFloat64() + } + + test(t, "+", add, fop(Fadd64), all) + test(t, "-", sub, fop(Fsub64), all) + if GOARCH != "386" { // 386 is not precise! + test(t, "*", mul, fop(Fmul64), all) + test(t, "/", div, fop(Fdiv64), all) + } +} + +// 64 -hw-> 32 -hw-> 64 +func trunc32(f float64) float64 { + return float64(float32(f)) +} + +// 64 -sw->32 -hw-> 64 +func to32sw(f float64) float64 { + return float64(math.Float32frombits(F64to32(math.Float64bits(f)))) +} + +// 64 -hw->32 -sw-> 64 +func to64sw(f float64) float64 { + return math.Float64frombits(F32to64(math.Float32bits(float32(f)))) +} + +// float64 -hw-> int64 -hw-> float64 +func hwint64(f float64) float64 { + return float64(int64(f)) +} + +// float64 -hw-> int32 -hw-> float64 +func hwint32(f float64) float64 { + return float64(int32(f)) +} + +// float64 -sw-> int64 -hw-> float64 +func toint64sw(f float64) float64 { + i, ok := F64toint(math.Float64bits(f)) + if !ok { + // There's no right answer for out of range. + // Match the hardware to pass the test. + i = int64(f) + } + return float64(i) +} + +// float64 -hw-> int64 -sw-> float64 +func fromint64sw(f float64) float64 { + return math.Float64frombits(Fintto64(int64(f))) +} + +var nerr int + +func err(t *testing.T, format string, args ...interface{}) { + t.Errorf(format, args...) + + // cut errors off after a while. + // otherwise we spend all our time + // allocating memory to hold the + // formatted output. + if nerr++; nerr >= 10 { + t.Fatal("too many errors") + } +} + +func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all []float64) { + for _, f := range all { + for _, g := range all { + h := hw(f, g) + s := sw(f, g) + if !same(h, s) { + err(t, "%g %s %g = sw %g, hw %g\n", f, op, g, s, h) + } + testu(t, "to32", trunc32, to32sw, h) + testu(t, "to64", trunc32, to64sw, h) + testu(t, "toint64", hwint64, toint64sw, h) + testu(t, "fromint64", hwint64, fromint64sw, h) + testcmp(t, f, h) + testcmp(t, h, f) + testcmp(t, g, h) + testcmp(t, h, g) + } + } +} + +func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) { + h := hw(v) + s := sw(v) + if !same(h, s) { + err(t, "%s %g = sw %g, hw %g\n", op, v, s, h) + } +} + +func hwcmp(f, g float64) (cmp int, isnan bool) { + switch { + case f < g: + return -1, false + case f > g: + return +1, false + case f == g: + return 0, false + } + return 0, true // must be NaN +} + +func testcmp(t *testing.T, f, g float64) { + hcmp, hisnan := hwcmp(f, g) + scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g)) + if hcmp != scmp || hisnan != sisnan { + err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan) + } +} + +func same(f, g float64) bool { + if math.IsNaN(f) && math.IsNaN(g) { + return true + } + if math.Copysign(1, f) != math.Copysign(1, g) { + return false + } + return f == g +} diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go new file mode 100644 index 00000000000..a16809fd062 --- /dev/null +++ b/libgo/go/runtime/type.go @@ -0,0 +1,206 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + * Runtime type representation. + * + * The following files know the exact layout of these + * data structures and must be kept in sync with this file: + * + * ../../cmd/gc/reflect.c + * ../../cmd/ld/dwarf.c + * ../reflect/type.go + * type.h + */ + +package runtime + +import "unsafe" + +// All types begin with a few common fields needed for +// the interface runtime. +type commonType struct { + Kind uint8 // type kind + align uint8 // alignment of variable with this type + fieldAlign uint8 // alignment of struct field with this type + size uintptr // size in bytes + hash uint32 // hash of type; avoids computation in hash tables + + hashfn func(unsafe.Pointer, uintptr) uintptr // hash function + equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function + + string *string // string form; unnecessary but undeniably useful + *uncommonType // (relatively) uncommon fields +} + +// Values for commonType.kind. +const ( + kindBool = 1 + iota + kindInt + kindInt8 + kindInt16 + kindInt32 + kindInt64 + kindUint + kindUint8 + kindUint16 + kindUint32 + kindUint64 + kindUintptr + kindFloat + kindFloat32 + kindFloat64 + kindComplex + kindComplex64 + kindComplex128 + kindArray + kindChan + kindFunc + kindInterface + kindMap + kindPtr + kindSlice + kindString + kindStruct + kindUnsafePointer + + // Not currently generated by gccgo. + // kindNoPointers = 1 << 7 // OR'ed into kind +) + +// Externally visible name. +type Type commonType + +// Method on non-interface type +type method struct { + name *string // name of method + pkgPath *string // nil for exported Names; otherwise import path + mtyp *Type // method type (without receiver) + typ *Type // .(*FuncType) underneath (with receiver) + tfn unsafe.Pointer // fn used for normal method call +} + +// uncommonType is present only for types with names or methods +// (if T is a named type, the uncommonTypes for T and *T have methods). +// Using a pointer to this struct reduces the overall size required +// to describe an unnamed type with no methods. +type uncommonType struct { + name *string // name of type + pkgPath *string // import path; nil for built-in types like int, string + methods []method // methods associated with type +} + +// BoolType represents a boolean type. +type BoolType commonType + +// FloatType represents a float type. +type FloatType commonType + +// ComplexType represents a complex type. +type ComplexType commonType + +// IntType represents an int type. +type IntType commonType + +// UintType represents a uint type. +type UintType commonType + +// StringType represents a string type. +type StringType commonType + +// UintptrType represents a uintptr type. +type UintptrType commonType + +// UnsafePointerType represents an unsafe.Pointer type. +type UnsafePointerType commonType + +// ArrayType represents a fixed array type. +type ArrayType struct { + commonType + elem *Type // array element type + len uintptr +} + +// SliceType represents a slice type. +type SliceType struct { + commonType + elem *Type // slice element type +} + +// ChanDir represents a channel type's direction. +type ChanDir int + +const ( + RecvDir ChanDir = 1 << iota // <-chan + SendDir // chan<- + BothDir = RecvDir | SendDir // chan +) + +// ChanType represents a channel type. +type ChanType struct { + commonType + elem *Type // channel element type + dir uintptr // channel direction (ChanDir) +} + +// FuncType represents a function type. +type FuncType struct { + commonType + dotdotdot bool // last input parameter is ... + in []*Type // input parameter types + out []*Type // output parameter types +} + +// Method on interface type +type imethod struct { + name *string // name of method + pkgPath *string // nil for exported Names; otherwise import path + typ *Type // .(*FuncType) underneath +} + +// InterfaceType represents an interface type. +type InterfaceType struct { + commonType + methods []imethod // sorted by hash +} + +// MapType represents a map type. +type MapType struct { + commonType + key *Type // map key type + elem *Type // map element (value) type +} + +// PtrType represents a pointer type. +type PtrType struct { + commonType + elem *Type // pointer element (pointed at) type +} + +// Struct field +type structField struct { + name *string // nil for embedded fields + pkgPath *string // nil for exported Names; otherwise import path + typ *Type // type of field + tag *string // nil if no tag + offset uintptr // byte offset of field within struct +} + +// StructType represents a struct type. +type StructType struct { + commonType + fields []structField // sorted by offset +} + +/* + * Must match iface.c:/Itab and compilers. + */ +type Itable struct { + Itype *Type // (*tab.inter).(*InterfaceType) is the interface type + Type *Type + link *Itable + bad int32 + unused int32 + Fn [100000]uintptr // bigger than we'll ever see +} |