Commit 58f7dab4 by Ian Lance Taylor

runtime: copy mstats code from Go 1.7 runtime

    
    This replaces mem.go and the C runtime_ReadMemStats function with the Go
    1.7 mstats.go.
    
    The GCStats code is commented out for now.  The corresponding gccgo code
    is in runtime/mgc0.c.
    
    The variables memstats and worldsema are shared between the Go code and
    the C code, but are not exported.  To make this work, add temporary
    accessor functions acquireWorldsema, releaseWorldsema, getMstats (the
    latter known as mstats in the C code).
    
    Check the preemptoff field of m when allocating and when considering
    whether to start a GC.  This works with the new stopTheWorld and
    startTheWorld functions in Go, which are essentially the Go 1.7
    versions.
    
    Change the compiler to stack allocate closures when compiling the
    runtime package.  Within the runtime packages closures do not escape.
    This is similar to what the gc compiler does, except that the gc
    compiler, when compiling the runtime package, gives an error if escape
    analysis shows that a closure does escape.  I added this here because
    the Go version of ReadMemStats calls systemstack with a closure, and
    having that allocate memory was causing some tests that measure memory
    allocations to fail.
    
    Reviewed-on: https://go-review.googlesource.com/30972

From-SVN: r241124
parent 5b1548fd
6c9070324d5b7c8483bc7c17b0a8faaa1fb1ae30
681580a3afc687ba3ff9ef240c67e8630e4306e6
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
......@@ -3026,6 +3026,21 @@ Parse::create_closure(Named_object* function, Enclosing_vars* enclosing_vars,
Struct_type* st = closure_var->var_value()->type()->deref()->struct_type();
Expression* cv = Expression::make_struct_composite_literal(st, initializer,
location);
// When compiling the runtime, closures do not escape. When escape
// analysis becomes the default, and applies to closures, this
// should be changed to make it an error if a closure escapes.
if (this->gogo_->compiling_runtime()
&& this->gogo_->package_name() == "runtime")
{
Temporary_statement* ctemp = Statement::make_temporary(st, cv, location);
this->gogo_->add_statement(ctemp);
Expression* ref = Expression::make_temporary_reference(ctemp, location);
Expression* addr = Expression::make_unary(OPERATOR_AND, ref, location);
addr->unary_expression()->set_does_not_escape();
return addr;
}
return Expression::make_heap_expression(cv, location);
}
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// Note: the MemStats struct should be kept in sync with
// struct MStats in malloc.h
// A MemStats records statistics about the memory allocator.
type MemStats struct {
// General statistics.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (sum of XxxSys below)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
Frees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc uint64 // bytes allocated and still in use
HeapSys uint64 // bytes obtained from system
HeapIdle uint64 // bytes in idle spans
HeapInuse uint64 // bytes in non-idle span
HeapReleased uint64 // bytes released to the OS
HeapObjects uint64 // total number of allocated objects
// Low-level fixed-size structure allocator statistics.
// Inuse is bytes used now.
// Sys is bytes obtained from system.
StackInuse uint64 // bootstrap stacks
StackSys uint64
MSpanInuse uint64 // mspan structures
MSpanSys uint64
MCacheInuse uint64 // mcache structures
MCacheSys uint64
BuckHashSys uint64 // profiling bucket hash table
GCSys uint64 // GC metadata
OtherSys uint64 // other system allocations
// Garbage collector statistics.
NextGC uint64 // next run in HeapAlloc time (bytes)
LastGC uint64 // last run in absolute time (ns)
PauseTotalNs uint64
PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
PauseEnd [256]uint64 // circular buffer of recent GC pause end times
NumGC uint32
GCCPUFraction float64 // fraction of CPU time used by GC
EnableGC bool
DebugGC bool
// Per-size allocation statistics.
// 61 is NumSizeClasses in the C code.
BySize [61]struct {
Size uint32
Mallocs uint64
Frees uint64
}
}
var Sizeof_C_MStats uintptr // filled in by malloc.goc
func init() {
var memStats MemStats
if Sizeof_C_MStats != unsafe.Sizeof(memStats) {
println(Sizeof_C_MStats, unsafe.Sizeof(memStats))
panic("MStats vs MemStatsType size mismatch")
}
}
// ReadMemStats populates m with memory allocator statistics.
func ReadMemStats(m *MemStats)
// GC runs a garbage collection.
func GC()
......@@ -367,3 +367,51 @@ func typeBitsBulkBarrier(typ *_type, p, size uintptr) {}
// Here for gccgo until we port msize.go.
func roundupsize(uintptr) uintptr
// Here for gccgo until we port mgc.go.
func GC()
// Here for gccgo until we port proc.go.
var worldsema uint32 = 1
func stopTheWorldWithSema()
func startTheWorldWithSema()
// For gccgo to call from C code.
//go:linkname acquireWorldsema runtime.acquireWorldsema
func acquireWorldsema() {
semacquire(&worldsema, false)
}
// For gccgo to call from C code.
//go:linkname releaseWorldsema runtime.releaseWorldsema
func releaseWorldsema() {
semrelease(&worldsema)
}
// Here for gccgo until we port proc.go.
func stopTheWorld(reason string) {
semacquire(&worldsema, false)
getg().m.preemptoff = reason
getg().m.gcing = 1
systemstack(stopTheWorldWithSema)
}
// Here for gccgo until we port proc.go.
func startTheWorld() {
getg().m.gcing = 0
getg().m.locks++
systemstack(startTheWorldWithSema)
// worldsema must be held over startTheWorldWithSema to ensure
// gomaxprocs cannot change while worldsema is held.
semrelease(&worldsema)
getg().m.preemptoff = ""
getg().m.locks--
}
// For gccgo to call from C code, so that the C code and the Go code
// can share the memstats variable for now.
//go:linkname getMstats runtime.getMstats
func getMstats() *mstats {
return &memstats
}
......@@ -146,7 +146,7 @@ runtime_SetCPUProfileRate(intgo hz)
runtime_lock(&lk);
if(hz > 0) {
if(prof == nil) {
prof = runtime_SysAlloc(sizeof *prof, &mstats.other_sys);
prof = runtime_SysAlloc(sizeof *prof, &mstats()->other_sys);
if(prof == nil) {
runtime_printf("runtime: cpu profiling cannot allocate memory\n");
runtime_unlock(&lk);
......
......@@ -489,33 +489,33 @@ dumpmemstats(void)
int32 i;
dumpint(TagMemStats);
dumpint(mstats.alloc);
dumpint(mstats.total_alloc);
dumpint(mstats.sys);
dumpint(mstats.nlookup);
dumpint(mstats.nmalloc);
dumpint(mstats.nfree);
dumpint(mstats.heap_alloc);
dumpint(mstats.heap_sys);
dumpint(mstats.heap_idle);
dumpint(mstats.heap_inuse);
dumpint(mstats.heap_released);
dumpint(mstats.heap_objects);
dumpint(mstats.stacks_inuse);
dumpint(mstats.stacks_sys);
dumpint(mstats.mspan_inuse);
dumpint(mstats.mspan_sys);
dumpint(mstats.mcache_inuse);
dumpint(mstats.mcache_sys);
dumpint(mstats.buckhash_sys);
dumpint(mstats.gc_sys);
dumpint(mstats.other_sys);
dumpint(mstats.next_gc);
dumpint(mstats.last_gc);
dumpint(mstats.pause_total_ns);
dumpint(mstats()->alloc);
dumpint(mstats()->total_alloc);
dumpint(mstats()->sys);
dumpint(mstats()->nlookup);
dumpint(mstats()->nmalloc);
dumpint(mstats()->nfree);
dumpint(mstats()->heap_alloc);
dumpint(mstats()->heap_sys);
dumpint(mstats()->heap_idle);
dumpint(mstats()->heap_inuse);
dumpint(mstats()->heap_released);
dumpint(mstats()->heap_objects);
dumpint(mstats()->stacks_inuse);
dumpint(mstats()->stacks_sys);
dumpint(mstats()->mspan_inuse);
dumpint(mstats()->mspan_sys);
dumpint(mstats()->mcache_inuse);
dumpint(mstats()->mcache_sys);
dumpint(mstats()->buckhash_sys);
dumpint(mstats()->gc_sys);
dumpint(mstats()->other_sys);
dumpint(mstats()->next_gc);
dumpint(mstats()->last_gc);
dumpint(mstats()->pause_total_ns);
for(i = 0; i < 256; i++)
dumpint(mstats.pause_ns[i]);
dumpint(mstats.numgc);
dumpint(mstats()->pause_ns[i]);
dumpint(mstats()->numgc);
}
static void
......@@ -615,11 +615,11 @@ runtime_debug_WriteHeapDump(uintptr fd)
G *g;
// Stop the world.
runtime_semacquire(&runtime_worldsema, false);
runtime_acquireWorldsema();
m = runtime_m();
m->gcing = 1;
m->locks++;
runtime_stoptheworld();
runtime_stopTheWorldWithSema();
// Update stats so we can dump them.
// As a side effect, flushes all the MCaches so the MSpan.freelist
......@@ -640,8 +640,8 @@ runtime_debug_WriteHeapDump(uintptr fd)
// Start up the world again.
m->gcing = 0;
runtime_semrelease(&runtime_worldsema);
runtime_starttheworld();
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
m->locks--;
}
......
......@@ -51,12 +51,9 @@ package runtime
// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
MHeap runtime_mheap;
MStats mstats;
int32 runtime_checking;
extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime_MemProfileRate
__asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
......@@ -81,6 +78,7 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
MLink *v, *next;
byte *tiny;
bool incallback;
MStats *pmstats;
if(size == 0) {
// All 0-length allocations use this pointer.
......@@ -105,7 +103,7 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
flag |= FlagNoInvokeGC;
}
if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
runtime_gosched();
m = runtime_m();
}
......@@ -252,7 +250,8 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
m->locks--;
if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
pmstats = mstats();
if(!(flag & FlagNoInvokeGC) && pmstats->heap_alloc >= pmstats->next_gc)
runtime_gc(0);
if(incallback)
......@@ -472,9 +471,9 @@ runtime_purgecachedstats(MCache *c)
// Protected by either heap or GC lock.
h = &runtime_mheap;
mstats.heap_alloc += (intptr)c->local_cachealloc;
mstats()->heap_alloc += (intptr)c->local_cachealloc;
c->local_cachealloc = 0;
mstats.nlookup += c->local_nlookup;
mstats()->nlookup += c->local_nlookup;
c->local_nlookup = 0;
h->largefree += c->local_largefree;
c->local_largefree = 0;
......@@ -486,13 +485,6 @@ runtime_purgecachedstats(MCache *c)
}
}
extern uintptr runtime_sizeof_C_MStats
__asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
// Size of the trailing by_size array differs between Go and C,
// _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
// sizeof_C_MStats is what C thinks about size of Go struct.
// Initialized in mallocinit because it's defined in go/runtime/mem.go.
#define MaxArena32 (2U<<30)
......@@ -508,8 +500,6 @@ runtime_mallocinit(void)
uint64 i;
bool reserved;
runtime_sizeof_C_MStats = sizeof(MStats) - (_NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
p = nil;
p_size = 0;
arena_size = 0;
......@@ -685,7 +675,7 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
if(n <= (uintptr)(h->arena_end - h->arena_used)) {
// Keep taking from our reservation.
p = h->arena_used;
runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
runtime_SysMap(p, n, h->arena_reserved, &mstats()->heap_sys);
h->arena_used += n;
runtime_MHeap_MapBits(h);
runtime_MHeap_MapSpans(h);
......@@ -703,14 +693,14 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
// try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for.
p_size = ROUND(n, PageSize) + PageSize;
p = runtime_SysAlloc(p_size, &mstats.heap_sys);
p = runtime_SysAlloc(p_size, &mstats()->heap_sys);
if(p == nil)
return nil;
if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32);
runtime_SysFree(p, p_size, &mstats.heap_sys);
runtime_SysFree(p, p_size, &mstats()->heap_sys);
return nil;
}
......@@ -763,7 +753,7 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
runtime_lock(&persistent);
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
if(persistent.pos + size > persistent.end) {
persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats()->other_sys);
if(persistent.pos == nil) {
runtime_unlock(&persistent);
runtime_throw("runtime: cannot allocate memory");
......@@ -773,10 +763,10 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
p = persistent.pos;
persistent.pos += size;
runtime_unlock(&persistent);
if(stat != &mstats.other_sys) {
if(stat != &mstats()->other_sys) {
// reaccount the allocation against provided stat
runtime_xadd64(stat, size);
runtime_xadd64(&mstats.other_sys, -(uint64)size);
runtime_xadd64(&mstats()->other_sys, -(uint64)size);
}
return p;
}
......
......@@ -83,7 +83,7 @@
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
typedef struct mspan MSpan;
typedef struct MStats MStats;
typedef struct mstats MStats;
typedef struct mlink MLink;
typedef struct mtypes MTypes;
typedef struct gcstats GCStats;
......@@ -216,63 +216,10 @@ void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*
void* runtime_FixAlloc_Alloc(FixAlloc *f);
void runtime_FixAlloc_Free(FixAlloc *f, void *p);
// Statistics.
// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
struct MStats
{
// General statistics.
uint64 alloc; // bytes allocated and still in use
uint64 total_alloc; // bytes allocated (even if freed)
uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
uint64 nlookup; // number of pointer lookups
uint64 nmalloc; // number of mallocs
uint64 nfree; // number of frees
// Statistics about malloc heap.
// protected by mheap.Lock
uint64 heap_alloc; // bytes allocated and still in use
uint64 heap_sys; // bytes obtained from system
uint64 heap_idle; // bytes in idle spans
uint64 heap_inuse; // bytes in non-idle spans
uint64 heap_released; // bytes released to the OS
uint64 heap_objects; // total number of allocated objects
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
uint64 stacks_inuse; // bootstrap stacks
uint64 stacks_sys;
uint64 mspan_inuse; // MSpan structures
uint64 mspan_sys;
uint64 mcache_inuse; // MCache structures
uint64 mcache_sys;
uint64 buckhash_sys; // profiling bucket hash table
uint64 gc_sys;
uint64 other_sys;
// Statistics about garbage collector.
// Protected by mheap or stopping the world during GC.
uint64 next_gc; // next GC (in heap_alloc time)
uint64 last_gc; // last GC (in absolute time)
uint64 pause_total_ns;
uint64 pause_ns[256];
uint64 pause_end[256];
uint32 numgc;
float64 gc_cpu_fraction;
bool enablegc;
bool debuggc;
// Statistics about allocation size classes.
struct {
uint32 size;
uint64 nmalloc;
uint64 nfree;
} by_size[_NumSizeClasses];
};
extern MStats mstats
__asm__ (GOSYM_PREFIX "runtime.memStats");
void runtime_updatememstats(GCStats *stats);
extern MStats *mstats(void)
__asm__ (GOSYM_PREFIX "runtime.getMstats");
void runtime_updatememstats(GCStats *stats)
__asm__ (GOSYM_PREFIX "runtime.updatememstats");
// Size classes. Computed and initialized by InitSizes.
//
......
......@@ -9,7 +9,7 @@ runtime_SysAlloc(uintptr n)
{
void *p;
mstats.sys += n;
mstats()->sys += n;
errno = posix_memalign(&p, PageSize, n);
if (errno > 0) {
perror("posix_memalign");
......@@ -29,7 +29,7 @@ runtime_SysUnused(void *v, uintptr n)
void
runtime_SysFree(void *v, uintptr n)
{
mstats.sys -= n;
mstats()->sys -= n;
free(v);
}
......
......@@ -36,7 +36,7 @@ RecordSpan(void *vh, byte *p)
cap = 64*1024/sizeof(all[0]);
if(cap < h->nspancap*3/2)
cap = h->nspancap*3/2;
all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys);
if(all == nil)
runtime_throw("runtime: cannot allocate memory");
if(h->allspans) {
......@@ -44,7 +44,7 @@ RecordSpan(void *vh, byte *p)
// Don't free the old array if it's referenced by sweep.
// See the comment in mgc0.c.
if(h->allspans != runtime_mheap.sweepspans)
runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys);
}
h->allspans = all;
h->nspancap = cap;
......@@ -56,12 +56,14 @@ RecordSpan(void *vh, byte *p)
void
runtime_MHeap_Init(MHeap *h)
{
MStats *pmstats;
uint32 i;
runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
pmstats = mstats();
runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys);
runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys);
runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys);
runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++) {
runtime_MSpanList_Init(&h->free[i]);
......@@ -88,7 +90,7 @@ runtime_MHeap_MapSpans(MHeap *h)
n = ROUND(n, pagesize);
if(h->spans_mapped >= n)
return;
runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys);
h->spans_mapped = n;
}
......@@ -173,17 +175,19 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
MStats *pmstats;
MSpan *s;
runtime_lock(h);
mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
pmstats = mstats();
pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
pmstats->heap_inuse += npage<<PageShift;
if(large) {
mstats.heap_objects++;
mstats.heap_alloc += npage<<PageShift;
pmstats->heap_objects++;
pmstats->heap_alloc += npage<<PageShift;
// Swept spans are at the end of lists.
if(s->npages < nelem(h->free))
runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
......@@ -237,8 +241,8 @@ HaveSpan:
runtime_MSpanList_Remove(s);
runtime_atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
mstats.heap_idle -= s->npages<<PageShift;
mstats.heap_released -= s->npreleased<<PageShift;
mstats()->heap_idle -= s->npages<<PageShift;
mstats()->heap_released -= s->npreleased<<PageShift;
if(s->npreleased > 0)
runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
s->npreleased = 0;
......@@ -326,7 +330,7 @@ MHeap_Grow(MHeap *h, uintptr npage)
v = runtime_MHeap_SysAlloc(h, ask);
}
if(v == nil) {
runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys);
return false;
}
}
......@@ -386,13 +390,16 @@ runtime_MHeap_LookupMaybe(MHeap *h, void *v)
void
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
MStats *pmstats;
runtime_lock(h);
mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
pmstats = mstats();
pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
mstats.heap_inuse -= s->npages<<PageShift;
pmstats->heap_inuse -= s->npages<<PageShift;
if(acct) {
mstats.heap_alloc -= s->npages<<PageShift;
mstats.heap_objects--;
pmstats->heap_alloc -= s->npages<<PageShift;
pmstats->heap_objects--;
}
MHeap_FreeLocked(h, s);
runtime_unlock(h);
......@@ -411,7 +418,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
runtime_throw("MHeap_FreeLocked - invalid free");
}
mstats.heap_idle += s->npages<<PageShift;
mstats()->heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
runtime_MSpanList_Remove(s);
// Stamp newly unused spans. The scavenger will use that
......@@ -472,7 +479,7 @@ scavengelist(MSpan *list, uint64 now, uint64 limit)
for(s=list->next; s != list; s=s->next) {
if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
released = (s->npages - s->npreleased) << PageShift;
mstats.heap_released += released;
mstats()->heap_released += released;
sumreleased += released;
s->npreleased = s->npages;
......@@ -508,8 +515,8 @@ scavenge(int32 k, uint64 now, uint64 limit)
if(sumreleased > 0)
runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20,
mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20);
}
}
......@@ -550,7 +557,7 @@ runtime_MHeap_Scavenger(void* dummy)
runtime_lock(h);
unixnow = runtime_unixnanotime();
if(unixnow - mstats.last_gc > forcegc) {
if(unixnow - mstats()->last_gc > forcegc) {
runtime_unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
......
......@@ -90,7 +90,7 @@ stkbucket(int32 typ, uintptr size, Location *stk, int32 nstk, bool alloc)
Bucket *b;
if(buckhash == nil) {
buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats.buckhash_sys);
buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats()->buckhash_sys);
if(buckhash == nil)
runtime_throw("runtime: cannot allocate memory");
}
......@@ -127,7 +127,7 @@ stkbucket(int32 typ, uintptr size, Location *stk, int32 nstk, bool alloc)
if(!alloc)
return nil;
b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats.buckhash_sys);
b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats()->buckhash_sys);
bucketmem += sizeof *b + nstk*sizeof stk[0];
runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
b->typ = typ;
......@@ -408,11 +408,11 @@ func Stack(b Slice, all bool) (n int) {
pc = (byte*)(uintptr)runtime_getcallerpc(&b);
if(all) {
runtime_semacquire(&runtime_worldsema, false);
runtime_acquireWorldsema();
runtime_m()->gcing = 1;
runtime_stoptheworld();
enablegc = mstats.enablegc;
mstats.enablegc = false;
runtime_stopTheWorldWithSema();
enablegc = mstats()->enablegc;
mstats()->enablegc = false;
}
if(b.__count == 0)
......@@ -436,9 +436,9 @@ func Stack(b Slice, all bool) (n int) {
if(all) {
runtime_m()->gcing = 0;
mstats.enablegc = enablegc;
runtime_semrelease(&runtime_worldsema);
runtime_starttheworld();
mstats()->enablegc = enablegc;
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
}
}
......@@ -469,9 +469,9 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
ok = false;
n = runtime_gcount();
if(n <= b.__count) {
runtime_semacquire(&runtime_worldsema, false);
runtime_acquireWorldsema();
runtime_m()->gcing = 1;
runtime_stoptheworld();
runtime_stopTheWorldWithSema();
n = runtime_gcount();
if(n <= b.__count) {
......@@ -488,8 +488,8 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
}
runtime_m()->gcing = 0;
runtime_semrelease(&runtime_worldsema);
runtime_starttheworld();
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
}
}
......
......@@ -60,6 +60,7 @@ runtime_InitSizes(void)
int32 align, sizeclass, size, nextsize, n;
uint32 i;
uintptr allocsize, npages;
MStats *pmstats;
// Initialize the runtime_class_to_size table (and choose class sizes in the process).
runtime_class_to_size[0] = 0;
......@@ -134,8 +135,9 @@ runtime_InitSizes(void)
}
// Copy out for statistics table.
pmstats = mstats();
for(i=0; i<nelem(runtime_class_to_size); i++)
mstats.by_size[i].size = runtime_class_to_size[i];
pmstats->by_size[i].size = runtime_class_to_size[i];
return;
dump:
......
......@@ -459,7 +459,7 @@ allocPollDesc(void)
n = 1;
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats()->other_sys);
for(i = 0; i < n; i++) {
pd[i].link = pollcache.first;
pollcache.first = &pd[i];
......
......@@ -149,7 +149,7 @@ runtime_netpoll(bool block)
if(inuse) {
if(!allocatedfds) {
prfds = runtime_SysAlloc(4 * sizeof fds, &mstats.other_sys);
prfds = runtime_SysAlloc(4 * sizeof fds, &mstats()->other_sys);
pwfds = prfds + 1;
pefds = pwfds + 1;
ptfds = pefds + 1;
......@@ -239,7 +239,7 @@ runtime_netpoll(bool block)
goto retry;
if(allocatedfds) {
runtime_SysFree(prfds, 4 * sizeof fds, &mstats.other_sys);
runtime_SysFree(prfds, 4 * sizeof fds, &mstats()->other_sys);
} else {
runtime_lock(&selectlock);
inuse = false;
......
......@@ -508,7 +508,7 @@ runtime_schedinit(void)
procresize(procs);
// Can not enable GC until all roots are registered.
// mstats.enablegc = 1;
// mstats()->enablegc = 1;
}
extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
......@@ -633,7 +633,7 @@ runtime_main(void* dummy __attribute__((unused)))
// For gccgo we have to wait until after main is initialized
// to enable GC, because initializing main registers the GC
// roots.
mstats.enablegc = 1;
mstats()->enablegc = 1;
if(runtime_isarchive) {
// This is not a complete program, but is instead a
......@@ -951,7 +951,7 @@ runtime_freezetheworld(void)
}
void
runtime_stoptheworld(void)
runtime_stopTheWorldWithSema(void)
{
int32 i;
uint32 s;
......@@ -1001,7 +1001,7 @@ mhelpgc(void)
}
void
runtime_starttheworld(void)
runtime_startTheWorldWithSema(void)
{
P *p, *p1;
M *mp;
......@@ -1045,7 +1045,7 @@ runtime_starttheworld(void)
mp = (M*)p->m;
p->m = 0;
if(mp->nextp)
runtime_throw("starttheworld: inconsistent mp->nextp");
runtime_throw("startTheWorldWithSema: inconsistent mp->nextp");
mp->nextp = (uintptr)p;
runtime_notewakeup(&mp->park);
} else {
......@@ -2373,7 +2373,7 @@ runtime_malg(int32 stacksize, byte** ret_stack, uintptr* ret_stacksize)
// 32-bit mode, the Go allocation space is all of
// memory anyhow.
if(sizeof(void*) == 8) {
void *p = runtime_SysAlloc(stacksize, &mstats.other_sys);
void *p = runtime_SysAlloc(stacksize, &mstats()->other_sys);
if(p == nil)
runtime_throw("runtime: cannot allocate memory for goroutine stack");
*ret_stack = (byte*)p;
......@@ -2583,13 +2583,13 @@ runtime_gomaxprocsfunc(int32 n)
}
runtime_unlock(&runtime_sched);
runtime_semacquire(&runtime_worldsema, false);
runtime_acquireWorldsema();
g->m->gcing = 1;
runtime_stoptheworld();
runtime_stopTheWorldWithSema();
newprocs = n;
g->m->gcing = 0;
runtime_semrelease(&runtime_worldsema);
runtime_starttheworld();
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
return ret;
}
......
......@@ -448,9 +448,14 @@ int32 runtime_setmaxthreads(int32);
G* runtime_timejump(void);
void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
void runtime_stoptheworld(void);
void runtime_starttheworld(void);
extern uint32 runtime_worldsema;
void runtime_stopTheWorldWithSema(void)
__asm__(GOSYM_PREFIX "runtime.stopTheWorldWithSema");
void runtime_startTheWorldWithSema(void)
__asm__(GOSYM_PREFIX "runtime.startTheWorldWithSema");
void runtime_acquireWorldsema(void)
__asm__(GOSYM_PREFIX "runtime.acquireWorldsema");
void runtime_releaseWorldsema(void)
__asm__(GOSYM_PREFIX "runtime.releaseWorldsema");
/*
* mutual exclusion locks. in the uncontended case,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment