diff options
Diffstat (limited to 'libgo/runtime/runtime.h')
-rw-r--r-- | libgo/runtime/runtime.h | 116 |
1 files changed, 104 insertions, 12 deletions
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 74050da9e98..defe792c761 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -1,8 +1,6 @@ -/* runtime.h -- runtime support for Go. - - Copyright 2009 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. #include "config.h" @@ -42,8 +40,12 @@ typedef signed int int64 __attribute__ ((mode (DI))); typedef unsigned int uint64 __attribute__ ((mode (DI))); typedef float float32 __attribute__ ((mode (SF))); typedef double float64 __attribute__ ((mode (DF))); +typedef signed int intptr __attribute__ ((mode (pointer))); typedef unsigned int uintptr __attribute__ ((mode (pointer))); +typedef int intgo; // Go's int +typedef unsigned int uintgo; // Go's uint + /* Defined types. */ typedef uint8 bool; @@ -59,6 +61,10 @@ typedef struct FixAlloc FixAlloc; typedef struct Hchan Hchan; typedef struct Timers Timers; typedef struct Timer Timer; +typedef struct GCStats GCStats; +typedef struct LFNode LFNode; +typedef struct ParFor ParFor; +typedef struct ParForThread ParForThread; typedef struct __go_open_array Slice; typedef struct __go_string String; @@ -105,6 +111,10 @@ enum true = 1, false = 0, }; +enum +{ + PtrSize = sizeof(void*), +}; /* * structures @@ -119,6 +129,16 @@ union Note uint32 key; // futex-based impl M* waitm; // waiting M (sema-based impl) }; +struct GCStats +{ + // the struct must consist of only uint64's, + // because it is casted to uint64[]. + uint64 nhandoff; + uint64 nhandoffcnt; + uint64 nprocyield; + uint64 nosyield; + uint64 nsleep; +}; struct G { Defer* defer; @@ -142,6 +162,7 @@ struct G G* schedlink; bool readyonstop; bool ispanic; + int8 raceignore; // ignore race detection events M* m; // for debuggers, but offset not hard-coded M* lockedm; M* idlem; @@ -190,6 +211,14 @@ struct M uintptr waitsema; // semaphore for parking on locks uint32 waitsemacount; uint32 waitsemalock; + GCStats gcstats; + bool racecall; + void* racepc; + + uintptr settype_buf[1024]; + uintptr settype_bufsize; + + uintptr end[]; }; struct SigTab @@ -218,7 +247,6 @@ struct Func uintptr entry; // entry pc }; -/* Macros. */ #ifdef GOOS_windows enum { @@ -257,6 +285,34 @@ struct Timer Eface arg; }; +// Lock-free stack node. +struct LFNode +{ + LFNode *next; + uintptr pushcnt; +}; + +// Parallel for descriptor. +struct ParFor +{ + void (*body)(ParFor*, uint32); // executed for each element + uint32 done; // number of idle threads + uint32 nthr; // total number of threads + uint32 nthrmax; // maximum number of threads + uint32 thrseq; // thread id sequencer + uint32 cnt; // iteration space [0, cnt) + void *ctx; // arbitrary user context + bool wait; // if true, wait while all threads finish processing, + // otherwise parfor may return while other threads are still working + ParForThread *thr; // array of thread descriptors + // stats + uint64 nsteal; + uint64 nstealcnt; + uint64 nprocyield; + uint64 nosyield; + uint64 nsleep; +}; + /* * defined macros * you need super-gopher-guru privilege @@ -265,6 +321,7 @@ struct Timer #define nelem(x) (sizeof(x)/sizeof((x)[0])) #define nil ((void*)0) #define USED(v) ((void) v) +#define ROUND(x, n) (((x)+(n)-1)&~((n)-1)) /* all-caps to mark as macro: it evaluates n twice */ /* * external data @@ -312,7 +369,8 @@ G* runtime_malg(int32, byte**, size_t*); void runtime_minit(void); void runtime_mallocinit(void); void runtime_gosched(void); -void runtime_tsleep(int64); +void runtime_park(void(*)(Lock*), Lock*, const char*); +void runtime_tsleep(int64, const char*); M* runtime_newm(void); void runtime_goexit(void); void runtime_entersyscall(void) __asm__("syscall.Entersyscall"); @@ -322,9 +380,12 @@ bool __go_sigsend(int32 sig); int32 runtime_callers(int32, uintptr*, int32); int64 runtime_nanotime(void); int64 runtime_cputicks(void); +int64 runtime_tickspersecond(void); +void runtime_blockevent(int64, int32); +extern int64 runtime_blockprofilerate; void runtime_stoptheworld(void); -void runtime_starttheworld(bool); +void runtime_starttheworld(void); extern uint32 runtime_worldsema; G* __go_go(void (*pfn)(void*), void*); @@ -372,6 +433,28 @@ void runtime_futexsleep(uint32*, uint32, int64); void runtime_futexwakeup(uint32*, uint32); /* + * Lock-free stack. + * Initialize uint64 head to 0, compare with 0 to test for emptiness. + * The stack does not keep pointers to nodes, + * so they can be garbage collected if there are no other pointers to nodes. + */ +void runtime_lfstackpush(uint64 *head, LFNode *node) + asm("runtime.lfstackpush"); +LFNode* runtime_lfstackpop(uint64 *head); + +/* + * Parallel for over [0, n). + * body() is executed for each iteration. + * nthr - total number of worker threads. + * ctx - arbitrary user context. + * if wait=true, threads return from parfor() when all work is done; + * otherwise, threads can return while other threads are still finishing processing. + */ +ParFor* runtime_parforalloc(uint32 nthrmax); +void runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32)); +void runtime_parfordo(ParFor *desc) asm("runtime.parfordo"); + +/* * low level C-called */ #define runtime_mmap mmap @@ -432,12 +515,17 @@ MCache* runtime_allocmcache(void); void free(void *v); #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) #define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) +#define runtime_cas64(pval, pold, new) __atomic_compare_exchange_n (pval, pold, new, 1, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) #define runtime_xadd(p, v) __sync_add_and_fetch (p, v) +#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v) #define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) #define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) #define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) #define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) +#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) +#define PREFETCH(p) __builtin_prefetch(p) struct __go_func_type; bool runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *); @@ -469,8 +557,7 @@ void runtime_newErrorString(String, Eface*) /* * wrapped for go users */ -bool runtime_isInf(float64 f, int32 sign); -#define runtime_isNaN(f) __builtin_isnan(f) +#define ISNAN(f) __builtin_isnan(f) void runtime_semacquire(uint32 volatile *); void runtime_semrelease(uint32 volatile *); int32 runtime_gomaxprocsfunc(int32 n); @@ -493,8 +580,13 @@ uintptr runtime_memlimit(void); // This is a no-op on other systems. void runtime_setprof(bool); -void runtime_time_scan(void (*)(byte*, int64)); -void runtime_trampoline_scan(void (*)(byte *, int64)); +enum +{ + UseSpanType = 1, +}; + +void runtime_time_scan(void (*)(byte*, uintptr)); +void runtime_trampoline_scan(void (*)(byte *, uintptr)); void runtime_setsig(int32, bool, bool); #define runtime_setitimer setitimer |