summaryrefslogtreecommitdiff
path: root/libgo/runtime/malloc.goc
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/malloc.goc')
-rw-r--r--libgo/runtime/malloc.goc882
1 files changed, 0 insertions, 882 deletions
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
deleted file mode 100644
index 232210fc4eb..00000000000
--- a/libgo/runtime/malloc.goc
+++ /dev/null
@@ -1,882 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// See malloc.h for overview.
-//
-// TODO(rsc): double-check stats.
-
-package runtime
-#include <stddef.h>
-#include <errno.h>
-#include <stdlib.h>
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "go-type.h"
-
-// Map gccgo field names to gc field names.
-// Type aka __go_type_descriptor
-#define kind __code
-#define string __reflection
-
-// GCCGO SPECIFIC CHANGE
-//
-// There is a long comment in runtime_mallocinit about where to put the heap
-// on a 64-bit system. It makes assumptions that are not valid on linux/arm64
-// -- it assumes user space can choose the lower 47 bits of a pointer, but on
-// linux/arm64 we can only choose the lower 39 bits. This means the heap is
-// roughly a quarter of the available address space and we cannot choose a bit
-// pattern that all pointers will have -- luckily the GC is mostly precise
-// these days so this doesn't matter all that much. The kernel (as of 3.13)
-// will allocate address space starting either down from 0x7fffffffff or up
-// from 0x2000000000, so we put the heap roughly in the middle of these two
-// addresses to minimize the chance that a non-heap allocation will get in the
-// way of the heap.
-//
-// This all means that there isn't much point in trying 256 different
-// locations for the heap on such systems.
-#ifdef __aarch64__
-#define HeapBase(i) ((void*)(uintptr)(0x40ULL<<32))
-#define HeapBaseOptions 1
-#else
-#define HeapBase(i) ((void*)(uintptr)(i<<40|0x00c0ULL<<32))
-#define HeapBaseOptions 0x80
-#endif
-// END GCCGO SPECIFIC CHANGE
-
-// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
-MHeap runtime_mheap;
-
-int32 runtime_checking;
-
-extern volatile intgo runtime_MemProfileRate
- __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
-
-static MSpan* largealloc(uint32, uintptr*);
-static void runtime_profilealloc(void *v, uintptr size);
-static void settype(MSpan *s, void *v, uintptr typ);
-
-// Allocate an object of at least size bytes.
-// Small objects are allocated from the per-thread cache's free lists.
-// Large objects (> 32 kB) are allocated straight from the heap.
-// If the block will be freed with runtime_free(), typ must be 0.
-void*
-runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
-{
- M *m;
- G *g;
- int32 sizeclass;
- uintptr tinysize, size1;
- intgo rate;
- MCache *c;
- MSpan *s;
- MLink *v, *next;
- byte *tiny;
- bool incallback;
- MStats *pmstats;
-
- if(size == 0) {
- // All 0-length allocations use this pointer.
- // The language does not require the allocations to
- // have distinct values.
- return runtime_getZerobase();
- }
-
- g = runtime_g();
- m = g->m;
-
- incallback = false;
- if(m->mcache == nil && m->ncgo > 0) {
- // For gccgo this case can occur when a cgo or SWIG function
- // has an interface return type and the function
- // returns a non-pointer, so memory allocation occurs
- // after syscall.Cgocall but before syscall.CgocallDone.
- // We treat it as a callback.
- runtime_exitsyscall(0);
- m = runtime_m();
- incallback = true;
- flag |= FlagNoInvokeGC;
- }
-
- if((g->preempt || runtime_gcwaiting()) && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
- g->preempt = false;
- runtime_gosched();
- m = runtime_m();
- }
- if(m->mallocing)
- runtime_throw("malloc/free - deadlock");
- // Disable preemption during settype.
- // We can not use m->mallocing for this, because settype calls mallocgc.
- m->locks++;
- m->mallocing = 1;
-
- if(DebugTypeAtBlockEnd)
- size += sizeof(uintptr);
-
- c = m->mcache;
- if(!runtime_debug.efence && size <= MaxSmallSize) {
- if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize) {
- // Tiny allocator.
- //
- // Tiny allocator combines several tiny allocation requests
- // into a single memory block. The resulting memory block
- // is freed when all subobjects are unreachable. The subobjects
- // must be FlagNoScan (don't have pointers), this ensures that
- // the amount of potentially wasted memory is bounded.
- //
- // Size of the memory block used for combining (TinySize) is tunable.
- // Current setting is 16 bytes, which relates to 2x worst case memory
- // wastage (when all but one subobjects are unreachable).
- // 8 bytes would result in no wastage at all, but provides less
- // opportunities for combining.
- // 32 bytes provides more opportunities for combining,
- // but can lead to 4x worst case wastage.
- // The best case winning is 8x regardless of block size.
- //
- // Objects obtained from tiny allocator must not be freed explicitly.
- // So when an object will be freed explicitly, we ensure that
- // its size >= TinySize.
- //
- // SetFinalizer has a special case for objects potentially coming
- // from tiny allocator, it such case it allows to set finalizers
- // for an inner byte of a memory block.
- //
- // The main targets of tiny allocator are small strings and
- // standalone escaping variables. On a json benchmark
- // the allocator reduces number of allocations by ~12% and
- // reduces heap size by ~20%.
-
- tinysize = c->tinysize;
- if(size <= tinysize) {
- tiny = c->tiny;
- // Align tiny pointer for required (conservative) alignment.
- if((size&7) == 0)
- tiny = (byte*)ROUND((uintptr)tiny, 8);
- else if((size&3) == 0)
- tiny = (byte*)ROUND((uintptr)tiny, 4);
- else if((size&1) == 0)
- tiny = (byte*)ROUND((uintptr)tiny, 2);
- size1 = size + (tiny - (byte*)c->tiny);
- if(size1 <= tinysize) {
- // The object fits into existing tiny block.
- v = (MLink*)tiny;
- c->tiny = (byte*)c->tiny + size1;
- c->tinysize -= size1;
- m->mallocing = 0;
- m->locks--;
- if(incallback)
- runtime_entersyscall(0);
- return v;
- }
- }
- // Allocate a new TinySize block.
- s = c->alloc[TinySizeClass];
- if(s->freelist == nil)
- s = runtime_MCache_Refill(c, TinySizeClass);
- v = s->freelist;
- next = v->next;
- s->freelist = next;
- s->ref++;
- if(next != nil) // prefetching nil leads to a DTLB miss
- PREFETCH(next);
- ((uint64*)v)[0] = 0;
- ((uint64*)v)[1] = 0;
- // See if we need to replace the existing tiny block with the new one
- // based on amount of remaining free space.
- if(TinySize-size > tinysize) {
- c->tiny = (byte*)v + size;
- c->tinysize = TinySize - size;
- }
- size = TinySize;
- goto done;
- }
- // Allocate from mcache free lists.
- // Inlined version of SizeToClass().
- if(size <= 1024-8)
- sizeclass = runtime_size_to_class8[(size+7)>>3];
- else
- sizeclass = runtime_size_to_class128[(size-1024+127) >> 7];
- size = runtime_class_to_size[sizeclass];
- s = c->alloc[sizeclass];
- if(s->freelist == nil)
- s = runtime_MCache_Refill(c, sizeclass);
- v = s->freelist;
- next = v->next;
- s->freelist = next;
- s->ref++;
- if(next != nil) // prefetching nil leads to a DTLB miss
- PREFETCH(next);
- if(!(flag & FlagNoZero)) {
- v->next = nil;
- // block is zeroed iff second word is zero ...
- if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
- runtime_memclr((byte*)v, size);
- }
- done:
- c->local_cachealloc += size;
- } else {
- // Allocate directly from heap.
- s = largealloc(flag, &size);
- v = (void*)(s->start << PageShift);
- }
-
- if(flag & FlagNoGC)
- runtime_marknogc(v);
- else if(!(flag & FlagNoScan))
- runtime_markscan(v);
-
- if(DebugTypeAtBlockEnd)
- *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
-
- m->mallocing = 0;
- // TODO: save type even if FlagNoScan? Potentially expensive but might help
- // heap profiling/tracing.
- if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
- settype(s, v, typ);
-
- if(runtime_debug.allocfreetrace)
- runtime_tracealloc(v, size, typ);
-
- if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
- if(size < (uintptr)rate && size < (uintptr)(uint32)c->next_sample)
- c->next_sample -= size;
- else
- runtime_profilealloc(v, size);
- }
-
- m->locks--;
-
- pmstats = mstats();
- if(!(flag & FlagNoInvokeGC) && pmstats->heap_alloc >= pmstats->next_gc)
- runtime_gc(0);
-
- if(incallback)
- runtime_entersyscall(0);
-
- return v;
-}
-
-static MSpan*
-largealloc(uint32 flag, uintptr *sizep)
-{
- uintptr npages, size;
- MSpan *s;
- void *v;
-
- // Allocate directly from heap.
- size = *sizep;
- if(size + PageSize < size)
- runtime_throw("out of memory");
- npages = size >> PageShift;
- if((size & PageMask) != 0)
- npages++;
- s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
- if(s == nil)
- runtime_throw("out of memory");
- s->limit = (uintptr)((byte*)(s->start<<PageShift) + size);
- *sizep = npages<<PageShift;
- v = (void*)(s->start << PageShift);
- // setup for mark sweep
- runtime_markspan(v, 0, 0, true);
- return s;
-}
-
-static void
-runtime_profilealloc(void *v, uintptr size)
-{
- uintptr rate;
- int32 next;
- MCache *c;
-
- c = runtime_m()->mcache;
- rate = runtime_MemProfileRate;
- if(size < rate) {
- // pick next profile time
- // If you change this, also change allocmcache.
- if(rate > 0x3fffffff) // make 2*rate not overflow
- rate = 0x3fffffff;
- next = runtime_fastrand() % (2*rate);
- // Subtract the "remainder" of the current allocation.
- // Otherwise objects that are close in size to sampling rate
- // will be under-sampled, because we consistently discard this remainder.
- next -= (size - c->next_sample);
- if(next < 0)
- next = 0;
- c->next_sample = next;
- }
- runtime_MProf_Malloc(v, size);
-}
-
-int32
-runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
-{
- M *m;
- uintptr n, i;
- byte *p;
- MSpan *s;
-
- m = runtime_m();
-
- m->mcache->local_nlookup++;
- if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
- // purge cache stats to prevent overflow
- runtime_lock(&runtime_mheap);
- runtime_purgecachedstats(m->mcache);
- runtime_unlock(&runtime_mheap);
- }
-
- s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
- if(sp)
- *sp = s;
- if(s == nil) {
- runtime_checkfreed(v, 1);
- if(base)
- *base = nil;
- if(size)
- *size = 0;
- return 0;
- }
-
- p = (byte*)((uintptr)s->start<<PageShift);
- if(s->sizeclass == 0) {
- // Large object.
- if(base)
- *base = p;
- if(size)
- *size = s->npages<<PageShift;
- return 1;
- }
-
- n = s->elemsize;
- if(base) {
- i = ((byte*)v - p)/n;
- *base = p + i*n;
- }
- if(size)
- *size = n;
-
- return 1;
-}
-
-void
-runtime_purgecachedstats(MCache *c)
-{
- MHeap *h;
- int32 i;
-
- // Protected by either heap or GC lock.
- h = &runtime_mheap;
- mstats()->heap_alloc += (intptr)c->local_cachealloc;
- c->local_cachealloc = 0;
- mstats()->nlookup += c->local_nlookup;
- c->local_nlookup = 0;
- h->largefree += c->local_largefree;
- c->local_largefree = 0;
- h->nlargefree += c->local_nlargefree;
- c->local_nlargefree = 0;
- for(i=0; i<(int32)nelem(c->local_nsmallfree); i++) {
- h->nsmallfree[i] += c->local_nsmallfree[i];
- c->local_nsmallfree[i] = 0;
- }
-}
-
-// Initialized in mallocinit because it's defined in go/runtime/mem.go.
-
-#define MaxArena32 (2U<<30)
-
-void
-runtime_mallocinit(void)
-{
- byte *p, *p1;
- uintptr arena_size, bitmap_size, spans_size, p_size;
- uintptr *pend;
- uintptr end;
- uintptr limit;
- uint64 i;
- bool reserved;
-
- p = nil;
- p_size = 0;
- arena_size = 0;
- bitmap_size = 0;
- spans_size = 0;
- reserved = false;
-
- // for 64-bit build
- USED(p);
- USED(p_size);
- USED(arena_size);
- USED(bitmap_size);
- USED(spans_size);
-
- runtime_InitSizes();
-
- if(runtime_class_to_size[TinySizeClass] != TinySize)
- runtime_throw("bad TinySizeClass");
-
- // limit = runtime_memlimit();
- // See https://code.google.com/p/go/issues/detail?id=5049
- // TODO(rsc): Fix after 1.1.
- limit = 0;
-
- // Set up the allocation arena, a contiguous area of memory where
- // allocated data will be found. The arena begins with a bitmap large
- // enough to hold 4 bits per allocated word.
- if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
- // On a 64-bit machine, allocate from a single contiguous reservation.
- // 128 GB (MaxMem) should be big enough for now.
- //
- // The code will work with the reservation at any address, but ask
- // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
- // Allocating a 128 GB region takes away 37 bits, and the amd64
- // doesn't let us choose the top 17 bits, so that leaves the 11 bits
- // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
- // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
- // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
- // UTF-8 sequences, and they are otherwise as far away from
- // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
- // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
- // on OS X during thread allocations. 0x00c0 causes conflicts with
- // AddressSanitizer which reserves all memory up to 0x0100.
- // These choices are both for debuggability and to reduce the
- // odds of the conservative garbage collector not collecting memory
- // because some non-pointer block of memory had a bit pattern
- // that matched a memory address.
- //
- // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
- // but it hardly matters: e0 00 is not valid UTF-8 either.
- //
- // If this fails we fall back to the 32 bit memory mechanism
- arena_size = MaxMem;
- bitmap_size = arena_size / (sizeof(void*)*8/4);
- spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
- spans_size = ROUND(spans_size, PageSize);
- for(i = 0; i < HeapBaseOptions; i++) {
- p = HeapBase(i);
- p_size = bitmap_size + spans_size + arena_size + PageSize;
- p = runtime_SysReserve(p, p_size, &reserved);
- if(p != nil)
- break;
- }
- }
- if (p == nil) {
- // On a 32-bit machine, we can't typically get away
- // with a giant virtual address space reservation.
- // Instead we map the memory information bitmap
- // immediately after the data segment, large enough
- // to handle another 2GB of mappings (256 MB),
- // along with a reservation for another 512 MB of memory.
- // When that gets used up, we'll start asking the kernel
- // for any memory anywhere and hope it's in the 2GB
- // following the bitmap (presumably the executable begins
- // near the bottom of memory, so we'll have to use up
- // most of memory before the kernel resorts to giving out
- // memory before the beginning of the text segment).
- //
- // Alternatively we could reserve 512 MB bitmap, enough
- // for 4GB of mappings, and then accept any memory the
- // kernel threw at us, but normally that's a waste of 512 MB
- // of address space, which is probably too much in a 32-bit world.
- bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
- arena_size = 512<<20;
- spans_size = MaxArena32 / PageSize * sizeof(runtime_mheap.spans[0]);
- if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
- bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
- arena_size = bitmap_size * 8;
- spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
- }
- spans_size = ROUND(spans_size, PageSize);
-
- // SysReserve treats the address we ask for, end, as a hint,
- // not as an absolute requirement. If we ask for the end
- // of the data segment but the operating system requires
- // a little more space before we can start allocating, it will
- // give out a slightly higher pointer. Except QEMU, which
- // is buggy, as usual: it won't adjust the pointer upward.
- // So adjust it upward a little bit ourselves: 1/4 MB to get
- // away from the running binary image and then round up
- // to a MB boundary.
-
- end = 0;
- pend = &__go_end;
- if(pend != nil)
- end = *pend;
- p = (byte*)ROUND(end + (1<<18), 1<<20);
- p_size = bitmap_size + spans_size + arena_size + PageSize;
- p = runtime_SysReserve(p, p_size, &reserved);
- if(p == nil)
- runtime_throw("runtime: cannot reserve arena virtual address space");
- }
-
- // PageSize can be larger than OS definition of page size,
- // so SysReserve can give us a PageSize-unaligned pointer.
- // To overcome this we ask for PageSize more and round up the pointer.
- p1 = (byte*)ROUND((uintptr)p, PageSize);
-
- runtime_mheap.spans = (MSpan**)p1;
- runtime_mheap.bitmap = p1 + spans_size;
- runtime_mheap.arena_start = p1 + spans_size + bitmap_size;
- runtime_mheap.arena_used = runtime_mheap.arena_start;
- runtime_mheap.arena_end = p + p_size;
- runtime_mheap.arena_reserved = reserved;
-
- if(((uintptr)runtime_mheap.arena_start & (PageSize-1)) != 0)
- runtime_throw("misrounded allocation in mallocinit");
-
- // Initialize the rest of the allocator.
- runtime_MHeap_Init(&runtime_mheap);
- runtime_m()->mcache = runtime_allocmcache();
-}
-
-void*
-runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
-{
- byte *p, *p_end;
- uintptr p_size;
- bool reserved;
-
-
- if(n > (uintptr)(h->arena_end - h->arena_used)) {
- // We are in 32-bit mode, maybe we didn't use all possible address space yet.
- // Reserve some more space.
- byte *new_end;
-
- p_size = ROUND(n + PageSize, 256<<20);
- new_end = h->arena_end + p_size;
- if(new_end <= h->arena_start + MaxArena32) {
- // TODO: It would be bad if part of the arena
- // is reserved and part is not.
- p = runtime_SysReserve(h->arena_end, p_size, &reserved);
- if(p == h->arena_end) {
- h->arena_end = new_end;
- h->arena_reserved = reserved;
- }
- else if(p+p_size <= h->arena_start + MaxArena32) {
- // Keep everything page-aligned.
- // Our pages are bigger than hardware pages.
- h->arena_end = p+p_size;
- h->arena_used = p + (-(uintptr)p&(PageSize-1));
- h->arena_reserved = reserved;
- } else {
- uint64 stat;
- stat = 0;
- runtime_SysFree(p, p_size, &stat);
- }
- }
- }
- if(n <= (uintptr)(h->arena_end - h->arena_used)) {
- // Keep taking from our reservation.
- p = h->arena_used;
- runtime_SysMap(p, n, h->arena_reserved, &mstats()->heap_sys);
- h->arena_used += n;
- runtime_MHeap_MapBits(h);
- runtime_MHeap_MapSpans(h);
-
- if(((uintptr)p & (PageSize-1)) != 0)
- runtime_throw("misrounded allocation in MHeap_SysAlloc");
- return p;
- }
-
- // If using 64-bit, our reservation is all we have.
- if((uintptr)(h->arena_end - h->arena_start) >= MaxArena32)
- return nil;
-
- // On 32-bit, once the reservation is gone we can
- // try to get memory at a location chosen by the OS
- // and hope that it is in the range we allocated bitmap for.
- p_size = ROUND(n, PageSize) + PageSize;
- p = runtime_SysAlloc(p_size, &mstats()->heap_sys);
- if(p == nil)
- return nil;
-
- if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
- runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
- p, h->arena_start, h->arena_start+MaxArena32);
- runtime_SysFree(p, p_size, &mstats()->heap_sys);
- return nil;
- }
-
- p_end = p + p_size;
- p += -(uintptr)p & (PageSize-1);
- if(p+n > h->arena_used) {
- h->arena_used = p+n;
- if(p_end > h->arena_end)
- h->arena_end = p_end;
- runtime_MHeap_MapBits(h);
- runtime_MHeap_MapSpans(h);
- }
-
- if(((uintptr)p & (PageSize-1)) != 0)
- runtime_throw("misrounded allocation in MHeap_SysAlloc");
- return p;
-}
-
-static struct
-{
- Lock;
- byte* pos;
- byte* end;
-} persistent;
-
-enum
-{
- PersistentAllocChunk = 256<<10,
- PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows
-};
-
-// Wrapper around SysAlloc that can allocate small chunks.
-// There is no associated free operation.
-// Intended for things like function/type/debug-related persistent data.
-// If align is 0, uses default align (currently 8).
-void*
-runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
-{
- byte *p;
-
- if(align != 0) {
- if(align&(align-1))
- runtime_throw("persistentalloc: align is not a power of 2");
- if(align > PageSize)
- runtime_throw("persistentalloc: align is too large");
- } else
- align = 8;
- if(size >= PersistentAllocMaxBlock)
- return runtime_SysAlloc(size, stat);
- runtime_lock(&persistent);
- persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
- if(persistent.pos + size > persistent.end) {
- persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats()->other_sys);
- if(persistent.pos == nil) {
- runtime_unlock(&persistent);
- runtime_throw("runtime: cannot allocate memory");
- }
- persistent.end = persistent.pos + PersistentAllocChunk;
- }
- p = persistent.pos;
- persistent.pos += size;
- runtime_unlock(&persistent);
- if(stat != &mstats()->other_sys) {
- // reaccount the allocation against provided stat
- runtime_xadd64(stat, size);
- runtime_xadd64(&mstats()->other_sys, -(uint64)size);
- }
- return p;
-}
-
-static void
-settype(MSpan *s, void *v, uintptr typ)
-{
- uintptr size, ofs, j, t;
- uintptr ntypes, nbytes2, nbytes3;
- uintptr *data2;
- byte *data3;
-
- if(s->sizeclass == 0) {
- s->types.compression = MTypes_Single;
- s->types.data = typ;
- return;
- }
- size = s->elemsize;
- ofs = ((uintptr)v - (s->start<<PageShift)) / size;
-
- switch(s->types.compression) {
- case MTypes_Empty:
- ntypes = (s->npages << PageShift) / size;
- nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
- data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Bytes;
- s->types.data = (uintptr)data3;
- ((uintptr*)data3)[1] = typ;
- data3[8*sizeof(uintptr) + ofs] = 1;
- break;
-
- case MTypes_Words:
- ((uintptr*)s->types.data)[ofs] = typ;
- break;
-
- case MTypes_Bytes:
- data3 = (byte*)s->types.data;
- for(j=1; j<8; j++) {
- if(((uintptr*)data3)[j] == typ) {
- break;
- }
- if(((uintptr*)data3)[j] == 0) {
- ((uintptr*)data3)[j] = typ;
- break;
- }
- }
- if(j < 8) {
- data3[8*sizeof(uintptr) + ofs] = j;
- } else {
- ntypes = (s->npages << PageShift) / size;
- nbytes2 = ntypes * sizeof(uintptr);
- data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Words;
- s->types.data = (uintptr)data2;
-
- // Move the contents of data3 to data2. Then deallocate data3.
- for(j=0; j<ntypes; j++) {
- t = data3[8*sizeof(uintptr) + j];
- t = ((uintptr*)data3)[t];
- data2[j] = t;
- }
- data2[ofs] = typ;
- }
- break;
- }
-}
-
-uintptr
-runtime_gettype(void *v)
-{
- MSpan *s;
- uintptr t, ofs;
- byte *data;
-
- s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
- if(s != nil) {
- t = 0;
- switch(s->types.compression) {
- case MTypes_Empty:
- break;
- case MTypes_Single:
- t = s->types.data;
- break;
- case MTypes_Words:
- ofs = (uintptr)v - (s->start<<PageShift);
- t = ((uintptr*)s->types.data)[ofs/s->elemsize];
- break;
- case MTypes_Bytes:
- ofs = (uintptr)v - (s->start<<PageShift);
- data = (byte*)s->types.data;
- t = data[8*sizeof(uintptr) + ofs/s->elemsize];
- t = ((uintptr*)data)[t];
- break;
- default:
- runtime_throw("runtime_gettype: invalid compression kind");
- }
- if(0) {
- runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
- }
- return t;
- }
- return 0;
-}
-
-// Runtime stubs.
-
-void*
-runtime_mal(uintptr n)
-{
- return runtime_mallocgc(n, 0, 0);
-}
-
-func new(typ *Type) (ret *uint8) {
- ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&kindNoPointers ? FlagNoScan : 0);
-}
-
-static void*
-runtime_docnew(const Type *typ, intgo n, int32 objtyp)
-{
- if((objtyp&(PtrSize-1)) != objtyp)
- runtime_throw("runtime: invalid objtyp");
- if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
- runtime_panicstring("runtime: allocation size out of range");
- return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&kindNoPointers ? FlagNoScan : 0);
-}
-
-// same as runtime_new, but callable from C
-void*
-runtime_cnew(const Type *typ)
-{
- return runtime_docnew(typ, 1, TypeInfo_SingleObject);
-}
-
-void*
-runtime_cnewarray(const Type *typ, intgo n)
-{
- return runtime_docnew(typ, n, TypeInfo_Array);
-}
-
-func GC() {
- runtime_gc(2); // force GC and do eager sweep
-}
-
-func SetFinalizer(obj Eface, finalizer Eface) {
- byte *base;
- uintptr size;
- const FuncType *ft;
- const Type *fint;
- const PtrType *ot;
-
- if((Type*)obj._type == nil) {
- runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
- goto throw;
- }
- if((((Type*)obj._type)->kind&kindMask) != GO_PTR) {
- runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *((Type*)obj._type)->__reflection);
- goto throw;
- }
- ot = (const PtrType*)obj._type;
- // As an implementation detail we do not run finalizers for zero-sized objects,
- // because we use &runtime_zerobase for all such allocations.
- if(ot->__element_type != nil && ot->__element_type->__size == 0)
- return;
- // The following check is required for cases when a user passes a pointer to composite literal,
- // but compiler makes it a pointer to global. For example:
- // var Foo = &Object{}
- // func main() {
- // runtime.SetFinalizer(Foo, nil)
- // }
- // See issue 7656.
- if((byte*)obj.data < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.data)
- return;
- if(!runtime_mlookup(obj.data, &base, &size, nil) || obj.data != base) {
- // As an implementation detail we allow to set finalizers for an inner byte
- // of an object if it could come from tiny alloc (see mallocgc for details).
- if(ot->__element_type == nil || (ot->__element_type->kind&kindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
- runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.data);
- goto throw;
- }
- }
- if((Type*)finalizer._type != nil) {
- runtime_createfing();
- if((((Type*)finalizer._type)->kind&kindMask) != GO_FUNC)
- goto badfunc;
- ft = (const FuncType*)finalizer._type;
- if(ft->__dotdotdot || ft->__in.__count != 1)
- goto badfunc;
- fint = *(Type**)ft->__in.__values;
- if(__go_type_descriptors_equal(fint, (Type*)obj._type)) {
- // ok - same type
- } else if((fint->kind&kindMask) == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || ((Type*)obj._type)->__uncommon == nil || ((Type*)obj._type)->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj._type)->__element_type)) {
- // ok - not same type, but both pointers,
- // one or the other is unnamed, and same element type, so assignable.
- } else if((fint->kind&kindMask) == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) {
- // ok - satisfies empty interface
- } else if((fint->kind&kindMask) == GO_INTERFACE && getitab(fint, (Type*)obj._type, true) != nil) {
- // ok - satisfies non-empty interface
- } else
- goto badfunc;
-
- ot = (const PtrType*)obj._type;
- if(!runtime_addfinalizer(obj.data, *(FuncVal**)finalizer.data, ft, ot)) {
- runtime_printf("runtime.SetFinalizer: finalizer already set\n");
- goto throw;
- }
- } else {
- // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
- runtime_removefinalizer(obj.data);
- }
- return;
-
-badfunc:
- runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *((Type*)obj._type)->__reflection, *((Type*)finalizer._type)->__reflection);
-throw:
- runtime_throw("runtime.SetFinalizer");
-}
-
-func KeepAlive(x Eface) {
- USED(x);
-}