Commit 1f0be9ee by Ian Lance Taylor

runtime: copy mprof code from Go 1.7 runtime

    
    Also create a gccgo version of some of the traceback code in
    traceback_gccgo.go, replacing some code currently in C.
    
    This required modifying the compiler so that when compiling the runtime
    package a slice expression does not cause a local array variable to
    escape to the heap.
    
    Reviewed-on: https://go-review.googlesource.com/31230

From-SVN: r241189
parent 2045acd9
993840643e27e52cda7e86e6a775f54443ea5d07 ec3dc927da71d15cac48a13c0fb0c1f94572d0d2
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.
...@@ -10308,7 +10308,7 @@ Array_index_expression::do_determine_type(const Type_context*) ...@@ -10308,7 +10308,7 @@ Array_index_expression::do_determine_type(const Type_context*)
// Check types of an array index. // Check types of an array index.
void void
Array_index_expression::do_check_types(Gogo*) Array_index_expression::do_check_types(Gogo* gogo)
{ {
Numeric_constant nc; Numeric_constant nc;
unsigned long v; unsigned long v;
...@@ -10427,7 +10427,18 @@ Array_index_expression::do_check_types(Gogo*) ...@@ -10427,7 +10427,18 @@ Array_index_expression::do_check_types(Gogo*)
if (!this->array_->is_addressable()) if (!this->array_->is_addressable())
this->report_error(_("slice of unaddressable value")); this->report_error(_("slice of unaddressable value"));
else else
this->array_->address_taken(true); {
bool escapes = true;
// When compiling the runtime, a slice operation does not
// cause local variables to escape. When escape analysis
// becomes the default, this should be changed to make it an
// error if we have a slice operation that escapes.
if (gogo->compiling_runtime() && gogo->package_name() == "runtime")
escapes = false;
this->array_->address_taken(escapes);
}
} }
} }
......
...@@ -478,7 +478,6 @@ runtime_files = \ ...@@ -478,7 +478,6 @@ runtime_files = \
runtime/go-signal.c \ runtime/go-signal.c \
runtime/go-strcmp.c \ runtime/go-strcmp.c \
runtime/go-strslice.c \ runtime/go-strslice.c \
runtime/go-traceback.c \
runtime/go-type-complex.c \ runtime/go-type-complex.c \
runtime/go-type-eface.c \ runtime/go-type-eface.c \
runtime/go-type-float.c \ runtime/go-type-float.c \
...@@ -515,7 +514,6 @@ runtime_files = \ ...@@ -515,7 +514,6 @@ runtime_files = \
go-iface.c \ go-iface.c \
lfstack.c \ lfstack.c \
malloc.c \ malloc.c \
mprof.c \
netpoll.c \ netpoll.c \
rdebug.c \ rdebug.c \
reflect.c \ reflect.c \
......
...@@ -253,17 +253,17 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \ ...@@ -253,17 +253,17 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
go-nanotime.lo go-now.lo go-new.lo go-nosys.lo go-panic.lo \ go-nanotime.lo go-now.lo go-new.lo go-nosys.lo go-panic.lo \
go-recover.lo go-reflect-call.lo go-runtime-error.lo \ go-recover.lo go-reflect-call.lo go-runtime-error.lo \
go-setenv.lo go-signal.lo go-strcmp.lo go-strslice.lo \ go-setenv.lo go-signal.lo go-strcmp.lo go-strslice.lo \
go-traceback.lo go-type-complex.lo go-type-eface.lo \ go-type-complex.lo go-type-eface.lo go-type-float.lo \
go-type-float.lo go-type-identity.lo go-type-interface.lo \ go-type-identity.lo go-type-interface.lo go-type-string.lo \
go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \ go-typedesc-equal.lo go-unsafe-new.lo go-unsafe-newarray.lo \
go-unsafe-newarray.lo go-unsafe-pointer.lo go-unsetenv.lo \ go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
go-unwind.lo go-varargs.lo env_posix.lo heapdump.lo mcache.lo \ env_posix.lo heapdump.lo mcache.lo mcentral.lo \
mcentral.lo $(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo \ $(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
msize.lo $(am__objects_2) panic.lo parfor.lo print.lo proc.lo \ $(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \ runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
$(am__objects_4) go-iface.lo lfstack.lo malloc.lo mprof.lo \ $(am__objects_4) go-iface.lo lfstack.lo malloc.lo netpoll.lo \
netpoll.lo rdebug.lo reflect.lo runtime1.lo sigqueue.lo \ rdebug.lo reflect.lo runtime1.lo sigqueue.lo time.lo \
time.lo $(am__objects_5) $(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6) am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS) libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
...@@ -877,7 +877,6 @@ runtime_files = \ ...@@ -877,7 +877,6 @@ runtime_files = \
runtime/go-signal.c \ runtime/go-signal.c \
runtime/go-strcmp.c \ runtime/go-strcmp.c \
runtime/go-strslice.c \ runtime/go-strslice.c \
runtime/go-traceback.c \
runtime/go-type-complex.c \ runtime/go-type-complex.c \
runtime/go-type-eface.c \ runtime/go-type-eface.c \
runtime/go-type-float.c \ runtime/go-type-float.c \
...@@ -914,7 +913,6 @@ runtime_files = \ ...@@ -914,7 +913,6 @@ runtime_files = \
go-iface.c \ go-iface.c \
lfstack.c \ lfstack.c \
malloc.c \ malloc.c \
mprof.c \
netpoll.c \ netpoll.c \
rdebug.c \ rdebug.c \
reflect.c \ reflect.c \
...@@ -1593,7 +1591,6 @@ distclean-compile: ...@@ -1593,7 +1591,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-signal.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-signal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strcmp.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strcmp.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strslice.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strslice.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-traceback.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-complex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-complex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-eface.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-eface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-float.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-float.Plo@am__quote@
...@@ -1620,7 +1617,6 @@ distclean-compile: ...@@ -1620,7 +1617,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mfixalloc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mfixalloc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mgc0.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mgc0.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheap.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mprof.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msize.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_epoll.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_epoll.Plo@am__quote@
...@@ -1986,13 +1982,6 @@ go-strslice.lo: runtime/go-strslice.c ...@@ -1986,13 +1982,6 @@ go-strslice.lo: runtime/go-strslice.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-strslice.lo `test -f 'runtime/go-strslice.c' || echo '$(srcdir)/'`runtime/go-strslice.c @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-strslice.lo `test -f 'runtime/go-strslice.c' || echo '$(srcdir)/'`runtime/go-strslice.c
go-traceback.lo: runtime/go-traceback.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-traceback.lo -MD -MP -MF $(DEPDIR)/go-traceback.Tpo -c -o go-traceback.lo `test -f 'runtime/go-traceback.c' || echo '$(srcdir)/'`runtime/go-traceback.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-traceback.Tpo $(DEPDIR)/go-traceback.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-traceback.c' object='go-traceback.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-traceback.lo `test -f 'runtime/go-traceback.c' || echo '$(srcdir)/'`runtime/go-traceback.c
go-type-complex.lo: runtime/go-type-complex.c go-type-complex.lo: runtime/go-type-complex.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-complex.lo -MD -MP -MF $(DEPDIR)/go-type-complex.Tpo -c -o go-type-complex.lo `test -f 'runtime/go-type-complex.c' || echo '$(srcdir)/'`runtime/go-type-complex.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-complex.lo -MD -MP -MF $(DEPDIR)/go-type-complex.Tpo -c -o go-type-complex.lo `test -f 'runtime/go-type-complex.c' || echo '$(srcdir)/'`runtime/go-type-complex.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-type-complex.Tpo $(DEPDIR)/go-type-complex.Plo @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-type-complex.Tpo $(DEPDIR)/go-type-complex.Plo
......
...@@ -4,18 +4,6 @@ ...@@ -4,18 +4,6 @@
package runtime package runtime
// Breakpoint executes a breakpoint trap.
func Breakpoint()
// LockOSThread wires the calling goroutine to its current operating system thread.
// Until the calling goroutine exits or calls UnlockOSThread, it will always
// execute in that thread, and no other goroutine can.
func LockOSThread()
// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
func UnlockOSThread()
// GOMAXPROCS sets the maximum number of CPUs that can be executing // GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not // simultaneously and returns the previous setting. If n < 1, it does not
// change the current setting. // change the current setting.
...@@ -36,145 +24,6 @@ func NumCgoCall() int64 ...@@ -36,145 +24,6 @@ func NumCgoCall() int64
// NumGoroutine returns the number of goroutines that currently exist. // NumGoroutine returns the number of goroutines that currently exist.
func NumGoroutine() int func NumGoroutine() int
// MemProfileRate controls the fraction of memory allocations
// that are recorded and reported in the memory profile.
// The profiler aims to sample an average of
// one allocation per MemProfileRate bytes allocated.
//
// To include every allocated block in the profile, set MemProfileRate to 1.
// To turn off profiling entirely, set MemProfileRate to 0.
//
// The tools that process the memory profiles assume that the
// profile rate is constant across the lifetime of the program
// and equal to the current value. Programs that change the
// memory profiling rate should do so just once, as early as
// possible in the execution of the program (for example,
// at the beginning of main).
var MemProfileRate int = 512 * 1024
// A MemProfileRecord describes the live objects allocated
// by a particular call sequence (stack trace).
type MemProfileRecord struct {
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
AllocObjects, FreeObjects int64 // number of objects allocated, freed
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (r *MemProfileRecord) InUseObjects() int64 {
return r.AllocObjects - r.FreeObjects
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *MemProfileRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfile returns n, the number of records in the current memory profile.
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
// If len(p) < n, MemProfile does not change p and returns n, false.
//
// If inuseZero is true, the profile includes allocation records
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
// These are sites where memory was allocated, but it has all
// been released back to the runtime.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.memprofile flag instead
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
// A StackRecord describes a single execution stack.
type StackRecord struct {
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *StackRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool)
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool)
// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
// blocking until data is available. If profiling is turned off and all the profile
// data accumulated while it was on has been returned, CPUProfile returns nil.
// The caller must save the returned data before calling CPUProfile again.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.cpuprofile flag instead of calling
// CPUProfile directly.
func CPUProfile() []byte
// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
// If hz <= 0, SetCPUProfileRate turns off profiling.
// If the profiler is on, the rate cannot be changed without first turning it off.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.cpuprofile flag instead of calling
// SetCPUProfileRate directly.
func SetCPUProfileRate(hz int)
// SetBlockProfileRate controls the fraction of goroutine blocking events
// that are reported in the blocking profile. The profiler aims to sample
// an average of one blocking event per rate nanoseconds spent blocked.
//
// To include every blocking event in the profile, pass rate = 1.
// To turn off profiling entirely, pass rate <= 0.
func SetBlockProfileRate(rate int)
// BlockProfileRecord describes blocking events originated
// at a particular call sequence (stack trace).
type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}
// BlockProfile returns n, the number of records in the current blocking profile.
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
// If len(p) < n, BlockProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool)
// Stack formats a stack trace of the calling goroutine into buf
// and returns the number of bytes written to buf.
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int
// Get field tracking information. Only fields with a tag go:"track" // Get field tracking information. Only fields with a tag go:"track"
// are tracked. This function will add every such field that is // are tracked. This function will add every such field that is
// referenced to the map. The keys in the map will be // referenced to the map. The keys in the map will be
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Malloc profiling.
// Patterned after tcmalloc's algorithms; shorter code.
package runtime
import (
"runtime/internal/atomic"
"unsafe"
)
// Export temporarily for gccgo's C code to call:
//go:linkname mProf_Malloc runtime.mProf_Malloc
//go:linkname mProf_Free runtime.mProf_Free
//go:linkname mProf_GC runtime.mProf_GC
//go:linkname tracealloc runtime.tracealloc
//go:linkname tracefree runtime.tracefree
//go:linkname tracegc runtime.tracegc
//go:linkname iterate_memprof runtime.iterate_memprof
// NOTE(rsc): Everything here could use cas if contention became an issue.
var proflock mutex
// All memory allocations are local and do not escape outside of the profiler.
// The profiler is forbidden from referring to garbage-collected memory.
const (
// profile types
memProfile bucketType = 1 + iota
blockProfile
// size of bucket hash table
buckHashSize = 179999
// max depth of stack to record in bucket
maxStack = 32
)
type bucketType int
// A bucket holds per-call-stack profiling information.
// The representation is a bit sleazy, inherited from C.
// This struct defines the bucket header. It is followed in
// memory by the stack words and then the actual record
// data, either a memRecord or a blockRecord.
//
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
type bucket struct {
next *bucket
allnext *bucket
typ bucketType // memBucket or blockBucket
hash uintptr
size uintptr
nstk uintptr
}
// A memRecord is the bucket data for a bucket of type memProfile,
// part of the memory profile.
type memRecord struct {
// The following complex 3-stage scheme of stats accumulation
// is required to obtain a consistent picture of mallocs and frees
// for some point in time.
// The problem is that mallocs come in real time, while frees
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
// Mallocs are accounted in recent stats.
// Explicit frees are accounted in recent stats.
// GC frees are accounted in prev stats.
// After GC prev stats are added to final stats and
// recent stats are moved into prev stats.
allocs uintptr
frees uintptr
alloc_bytes uintptr
free_bytes uintptr
// changes between next-to-last GC and last GC
prev_allocs uintptr
prev_frees uintptr
prev_alloc_bytes uintptr
prev_free_bytes uintptr
// changes since last GC
recent_allocs uintptr
recent_frees uintptr
recent_alloc_bytes uintptr
recent_free_bytes uintptr
}
// A blockRecord is the bucket data for a bucket of type blockProfile,
// part of the blocking profile.
type blockRecord struct {
count int64
cycles int64
}
var (
mbuckets *bucket // memory profile buckets
bbuckets *bucket // blocking profile buckets
buckhash *[179999]*bucket
bucketmem uintptr
)
// newBucket allocates a bucket with the given type and number of stack entries.
func newBucket(typ bucketType, nstk int) *bucket {
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{})
switch typ {
default:
throw("invalid profile bucket type")
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile:
size += unsafe.Sizeof(blockRecord{})
}
b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
bucketmem += size
b.typ = typ
b.nstk = uintptr(nstk)
return b
}
// stk returns the slice in b holding the stack.
func (b *bucket) stk() []location {
stk := (*[maxStack]location)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
return stk[:b.nstk:b.nstk]
}
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
if b.typ != memProfile {
throw("bad use of bucket.mp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
return (*memRecord)(data)
}
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
if b.typ != blockProfile {
throw("bad use of bucket.bp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
return (*blockRecord)(data)
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket {
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
throw("runtime: cannot allocate memory")
}
}
// Hash stack.
var h uintptr
for _, loc := range stk {
h += loc.pc
h += h << 10
h ^= h >> 6
}
// hash in size
h += size
h += h << 10
h ^= h >> 6
// finalize
h += h << 3
h ^= h >> 11
i := int(h % buckHashSize)
for b := buckhash[i]; b != nil; b = b.next {
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
return b
}
}
if !alloc {
return nil
}
// Create new bucket.
b := newBucket(typ, len(stk))
copy(b.stk(), stk)
b.hash = h
b.size = size
b.next = buckhash[i]
buckhash[i] = b
if typ == memProfile {
b.allnext = mbuckets
mbuckets = b
} else {
b.allnext = bbuckets
bbuckets = b
}
return b
}
func eqslice(x, y []location) bool {
if len(x) != len(y) {
return false
}
for i, xi := range x {
if xi != y[i] {
return false
}
}
return true
}
func mprof_GC() {
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
mp.allocs += mp.prev_allocs
mp.frees += mp.prev_frees
mp.alloc_bytes += mp.prev_alloc_bytes
mp.free_bytes += mp.prev_free_bytes
mp.prev_allocs = mp.recent_allocs
mp.prev_frees = mp.recent_frees
mp.prev_alloc_bytes = mp.recent_alloc_bytes
mp.prev_free_bytes = mp.recent_free_bytes
mp.recent_allocs = 0
mp.recent_frees = 0
mp.recent_alloc_bytes = 0
mp.recent_free_bytes = 0
}
}
// Record that a gc just happened: all the 'recent' statistics are now real.
func mProf_GC() {
lock(&proflock)
mprof_GC()
unlock(&proflock)
}
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
var stk [maxStack]location
nstk := callers(4, stk[:])
lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true)
mp := b.mp()
mp.recent_allocs++
mp.recent_alloc_bytes += size
unlock(&proflock)
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
// This reduces potential contention and chances of deadlocks.
// Since the object must be alive during call to mProf_Malloc,
// it's fine to do this non-atomically.
systemstack(func() {
setprofilebucket(p, b)
})
}
// Called when freeing a profiled block.
func mProf_Free(b *bucket, size uintptr) {
lock(&proflock)
mp := b.mp()
mp.prev_frees++
mp.prev_free_bytes += size
unlock(&proflock)
}
var blockprofilerate uint64 // in CPU ticks
// SetBlockProfileRate controls the fraction of goroutine blocking events
// that are reported in the blocking profile. The profiler aims to sample
// an average of one blocking event per rate nanoseconds spent blocked.
//
// To include every blocking event in the profile, pass rate = 1.
// To turn off profiling entirely, pass rate <= 0.
func SetBlockProfileRate(rate int) {
var r int64
if rate <= 0 {
r = 0 // disable profiling
} else if rate == 1 {
r = 1 // profile everything
} else {
// convert ns to cycles, use float64 to prevent overflow during multiplication
r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
if r == 0 {
r = 1
}
}
atomic.Store64(&blockprofilerate, uint64(r))
}
func blockevent(cycles int64, skip int) {
if cycles <= 0 {
cycles = 1
}
rate := int64(atomic.Load64(&blockprofilerate))
if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
return
}
gp := getg()
var nstk int
var stk [maxStack]location
if gp.m.curg == nil || gp.m.curg == gp {
nstk = callers(skip, stk[:])
} else {
// FIXME: This should get a traceback of gp.m.curg.
// nstk = gcallers(gp.m.curg, skip, stk[:])
nstk = callers(skip, stk[:])
}
lock(&proflock)
b := stkbucket(blockProfile, 0, stk[:nstk], true)
b.bp().count++
b.bp().cycles += cycles
unlock(&proflock)
}
// Go interface to profile data.
// A StackRecord describes a single execution stack.
type StackRecord struct {
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *StackRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfileRate controls the fraction of memory allocations
// that are recorded and reported in the memory profile.
// The profiler aims to sample an average of
// one allocation per MemProfileRate bytes allocated.
//
// To include every allocated block in the profile, set MemProfileRate to 1.
// To turn off profiling entirely, set MemProfileRate to 0.
//
// The tools that process the memory profiles assume that the
// profile rate is constant across the lifetime of the program
// and equal to the current value. Programs that change the
// memory profiling rate should do so just once, as early as
// possible in the execution of the program (for example,
// at the beginning of main).
var MemProfileRate int = 512 * 1024
// A MemProfileRecord describes the live objects allocated
// by a particular call sequence (stack trace).
type MemProfileRecord struct {
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
AllocObjects, FreeObjects int64 // number of objects allocated, freed
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (r *MemProfileRecord) InUseObjects() int64 {
return r.AllocObjects - r.FreeObjects
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *MemProfileRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfile returns a profile of memory allocated and freed per allocation
// site.
//
// MemProfile returns n, the number of records in the current memory profile.
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
// If len(p) < n, MemProfile does not change p and returns n, false.
//
// If inuseZero is true, the profile includes allocation records
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
// These are sites where memory was allocated, but it has all
// been released back to the runtime.
//
// The returned profile may be up to two garbage collection cycles old.
// This is to avoid skewing the profile toward allocations; because
// allocations happen in real time but frees are delayed until the garbage
// collector performs sweeping, the profile only accounts for allocations
// that have had a chance to be freed by the garbage collector.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.memprofile flag instead
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
lock(&proflock)
clear := true
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes {
n++
}
if mp.allocs != 0 || mp.frees != 0 {
clear = false
}
}
if clear {
// Absolutely no data, suggesting that a garbage collection
// has not yet happened. In order to allow profiling when
// garbage collection is disabled from the beginning of execution,
// accumulate stats as if a GC just happened, and recount buckets.
mprof_GC()
mprof_GC()
n = 0
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes {
n++
}
}
}
if n <= len(p) {
ok = true
idx := 0
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes {
record(&p[idx], b)
idx++
}
}
}
unlock(&proflock)
return
}
// Write b's data to r.
func record(r *MemProfileRecord, b *bucket) {
mp := b.mp()
r.AllocBytes = int64(mp.alloc_bytes)
r.FreeBytes = int64(mp.free_bytes)
r.AllocObjects = int64(mp.allocs)
r.FreeObjects = int64(mp.frees)
for i, loc := range b.stk() {
if i >= len(r.Stack0) {
break
}
r.Stack0[i] = loc.pc
}
for i := int(b.nstk); i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
}
func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uintptr)) {
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
}
unlock(&proflock)
}
// BlockProfileRecord describes blocking events originated
// at a particular call sequence (stack trace).
type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}
// BlockProfile returns n, the number of records in the current blocking profile.
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
// If len(p) < n, BlockProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
lock(&proflock)
for b := bbuckets; b != nil; b = b.allnext {
n++
}
if n <= len(p) {
ok = true
for b := bbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
r.Count = bp.count
r.Cycles = bp.cycles
i := 0
var loc location
for i, loc = range b.stk() {
if i >= len(r.Stack0) {
break
}
r.Stack0[i] = loc.pc
}
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
p = p[1:]
}
}
unlock(&proflock)
return
}
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
first := (*m)(atomic.Loadp(unsafe.Pointer(allm())))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
if n <= len(p) {
ok = true
i := 0
for mp := first; mp != nil; mp = mp.alllink {
for j := range mp.createstack {
p[i].Stack0[j] = mp.createstack[j].pc
}
i++
}
}
return
}
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
gp := getg()
isOK := func(gp1 *g) bool {
// Checking isSystemGoroutine here makes GoroutineProfile
// consistent with both NumGoroutine and Stack.
return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
}
stopTheWorld("profile")
n = 1
for _, gp1 := range allgs() {
if isOK(gp1) {
n++
}
}
if n <= len(p) {
ok = true
r := p
// Save current goroutine.
saveg(gp, &r[0])
r = r[1:]
// Save other goroutines.
for _, gp1 := range allgs() {
if isOK(gp1) {
if len(r) == 0 {
// Should be impossible, but better to return a
// truncated profile than to crash the entire process.
break
}
saveg(gp1, &r[0])
r = r[1:]
}
}
}
startTheWorld()
return n, ok
}
func saveg(gp *g, r *StackRecord) {
if gp == getg() {
var locbuf [32]location
n := callers(1, locbuf[:])
for i := 0; i < n; i++ {
r.Stack0[i] = locbuf[i].pc
}
if n < len(r.Stack0) {
r.Stack0[n] = 0
}
} else {
// FIXME: Not implemented.
r.Stack0[0] = 0
}
}
// Stack formats a stack trace of the calling goroutine into buf
// and returns the number of bytes written to buf.
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int {
if all {
stopTheWorld("stack trace")
}
n := 0
if len(buf) > 0 {
gp := getg()
// Force traceback=1 to override GOTRACEBACK setting,
// so that Stack's results are consistent.
// GOTRACEBACK is only about crash dumps.
gp.m.traceback = 1
gp.writebuf = buf[0:0:len(buf)]
goroutineheader(gp)
traceback()
if all {
tracebackothers(gp)
}
gp.m.traceback = 0
n = len(gp.writebuf)
gp.writebuf = nil
}
if all {
startTheWorld()
}
return n
}
// Tracing of alloc/free/gc.
var tracelock mutex
func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
lock(&tracelock)
gp := getg()
gp.m.traceback = 2
if typ == nil {
print("tracealloc(", p, ", ", hex(size), ")\n")
} else {
print("tracealloc(", p, ", ", hex(size), ", ", *typ.string, ")\n")
}
if gp.m.curg == nil || gp == gp.m.curg {
goroutineheader(gp)
traceback()
} else {
goroutineheader(gp.m.curg)
// FIXME: Can't do traceback of other g.
}
print("\n")
gp.m.traceback = 0
unlock(&tracelock)
}
func tracefree(p unsafe.Pointer, size uintptr) {
lock(&tracelock)
gp := getg()
gp.m.traceback = 2
print("tracefree(", p, ", ", hex(size), ")\n")
goroutineheader(gp)
traceback()
print("\n")
gp.m.traceback = 0
unlock(&tracelock)
}
func tracegc() {
lock(&tracelock)
gp := getg()
gp.m.traceback = 2
print("tracegc()\n")
// running on m->g0 stack; show all non-g0 goroutines
tracebackothers(gp)
print("end tracegc\n")
print("\n")
gp.m.traceback = 0
unlock(&tracelock)
}
...@@ -394,7 +394,7 @@ type g struct { ...@@ -394,7 +394,7 @@ type g struct {
issystem bool // do not output in stack dump issystem bool // do not output in stack dump
isbackground bool // ignore in deadlock detector isbackground bool // ignore in deadlock detector
traceback *traceback // stack traceback buffer traceback *tracebackg // stack traceback buffer
context g_ucontext_t // saved context for setcontext context g_ucontext_t // saved context for setcontext
stackcontext [10]unsafe.Pointer // split-stack context stackcontext [10]unsafe.Pointer // split-stack context
...@@ -801,21 +801,6 @@ var ( ...@@ -801,21 +801,6 @@ var (
// array. // array.
type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(unsafe.Pointer(nil))]unsafe.Pointer type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(unsafe.Pointer(nil))]unsafe.Pointer
// traceback is used to collect stack traces from other goroutines.
type traceback struct {
gp *g
locbuf [_TracebackMaxFrames]location
c int
}
// location is a location in the program, used for backtraces.
type location struct {
pc uintptr
filename string
function string
lineno int
}
// cgoMal tracks allocations made by _cgo_allocate // cgoMal tracks allocations made by _cgo_allocate
// FIXME: _cgo_allocate has been removed from gc and can probably be // FIXME: _cgo_allocate has been removed from gc and can probably be
// removed from gccgo too. // removed from gccgo too.
......
...@@ -307,11 +307,6 @@ func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int) ...@@ -307,11 +307,6 @@ func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
func goparkunlock(*mutex, string, byte, int) func goparkunlock(*mutex, string, byte, int)
func goready(*g, int) func goready(*g, int)
// Temporary for gccgo until we port mprof.go.
var blockprofilerate uint64
func blockevent(cycles int64, skip int) {}
// Temporary hack for gccgo until we port proc.go. // Temporary hack for gccgo until we port proc.go.
//go:nosplit //go:nosplit
func acquireSudog() *sudog { func acquireSudog() *sudog {
...@@ -428,3 +423,24 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer ...@@ -428,3 +423,24 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
func cpuprofAdd(stk []uintptr) { func cpuprofAdd(stk []uintptr) {
cpuprof.add(stk) cpuprof.add(stk)
} }
// For gccgo until we port proc.go.
func Breakpoint()
func LockOSThread()
func UnlockOSThread()
func allm() *m
func allgs() []*g
//go:nosplit
func readgstatus(gp *g) uint32 {
return atomic.Load(&gp.atomicstatus)
}
// Temporary for gccgo until we port malloc.go
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
// Temporary for gccgo until we port mheap.go
func setprofilebucket(p unsafe.Pointer, b *bucket)
// Currently in proc.c.
func tracebackothers(*g)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Traceback support for gccgo.
// The actual traceback code is written in C.
package runtime
import (
"runtime/internal/sys"
_ "unsafe" // for go:linkname
)
// For gccgo, use go:linkname to rename compiler-called functions to
// themselves, so that the compiler will export them.
// These are temporary for C runtime code to call.
//go:linkname traceback runtime.traceback
//go:linkname printtrace runtime.printtrace
//go:linkname goroutineheader runtime.goroutineheader
//go:linkname printcreatedby runtime.printcreatedby
func printcreatedby(gp *g) {
// Show what created goroutine, except main goroutine (goid 1).
pc := gp.gopc
tracepc := pc // back up to CALL instruction for funcfileline.
entry := funcentry(tracepc)
if entry != 0 && tracepc > entry {
tracepc -= sys.PCQuantum
}
function, file, line := funcfileline(tracepc, -1)
if function != "" && showframe(function, gp) && gp.goid != 1 {
print("created by ", function, "\n")
print("\t", file, ":", line)
if entry != 0 && pc > entry {
print(" +", hex(pc-entry))
}
print("\n")
}
}
// tracebackg is used to collect stack traces from other goroutines.
type tracebackg struct {
gp *g
locbuf [_TracebackMaxFrames]location
c int
}
// location is a location in the program, used for backtraces.
type location struct {
pc uintptr
filename string
function string
lineno int
}
//extern runtime_callers
func c_callers(skip int32, locbuf *location, max int32, keepThunks bool) int32
// callers returns a stack trace of the current goroutine.
// The gc version of callers takes []uintptr, but we take []location.
func callers(skip int, locbuf []location) int {
n := c_callers(int32(skip), &locbuf[0], int32(len(locbuf)), false)
return int(n)
}
// traceback prints a traceback of the current goroutine.
// This differs from the gc version, which is given pc, sp, lr and g and
// can print a traceback of any goroutine.
func traceback() {
var locbuf [100]location
c := c_callers(1, &locbuf[0], int32(len(locbuf)), false)
printtrace(locbuf[:c], getg())
}
// printtrace prints a traceback from locbuf.
func printtrace(locbuf []location, gp *g) {
for i := range locbuf {
if showframe(locbuf[i].function, gp) {
print(locbuf[i].function, "\n\t", locbuf[i].filename, ":", locbuf[i].lineno)
}
}
}
// showframe returns whether to print a frame in a traceback.
// name is the function name.
func showframe(name string, gp *g) bool {
g := getg()
if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
return true
}
level, _, _ := gotraceback()
// Special case: always show runtime.gopanic frame, so that we can
// see where a panic started in the middle of a stack trace.
// See golang.org/issue/5832.
// __go_panic is the current gccgo name.
if name == "runtime.gopanic" || name == "__go_panic" {
return true
}
return level > 1 || contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name))
}
// isExportedRuntime reports whether name is an exported runtime function.
// It is only for runtime functions, so ASCII A-Z is fine.
func isExportedRuntime(name string) bool {
const n = len("runtime.")
return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z'
}
var gStatusStrings = [...]string{
_Gidle: "idle",
_Grunnable: "runnable",
_Grunning: "running",
_Gsyscall: "syscall",
_Gwaiting: "waiting",
_Gdead: "dead",
_Gcopystack: "copystack",
}
func goroutineheader(gp *g) {
gpstatus := readgstatus(gp)
isScan := gpstatus&_Gscan != 0
gpstatus &^= _Gscan // drop the scan bit
// Basic string status
var status string
if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
status = gStatusStrings[gpstatus]
} else {
status = "???"
}
// Override.
if gpstatus == _Gwaiting && gp.waitreason != "" {
status = gp.waitreason
}
// approx time the G is blocked, in minutes
var waitfor int64
if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
waitfor = (nanotime() - gp.waitsince) / 60e9
}
print("goroutine ", gp.goid, " [", status)
if isScan {
print(" (scan)")
}
if waitfor >= 1 {
print(", ", waitfor, " minutes")
}
if gp.lockedm != nil {
print(", locked to thread")
}
print("]:\n")
}
// isSystemGoroutine reports whether the goroutine g must be omitted in
// stack dumps and deadlock detector.
func isSystemGoroutine(gp *g) bool {
// FIXME.
return false
}
/* go-traceback.c -- stack backtrace for Go.
Copyright 2012 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
#include "config.h"
#include "runtime.h"
/* Print a stack trace for the current goroutine. */
void
runtime_traceback ()
{
Location locbuf[100];
int32 c;
c = runtime_callers (1, locbuf, nelem (locbuf), false);
runtime_printtrace (locbuf, c, true);
}
void
runtime_printtrace (Location *locbuf, int32 c, bool current)
{
int32 i;
for (i = 0; i < c; ++i)
{
if (runtime_showframe (locbuf[i].function, current))
{
runtime_printf ("%S\n", locbuf[i].function);
runtime_printf ("\t%S:%D\n", locbuf[i].filename,
(int64) locbuf[i].lineno);
}
}
}
...@@ -303,7 +303,7 @@ struct SpecialFinalizer ...@@ -303,7 +303,7 @@ struct SpecialFinalizer
}; };
// The described object is being heap profiled. // The described object is being heap profiled.
typedef struct Bucket Bucket; // from mprof.goc typedef struct bucket Bucket; // from mprof.go
typedef struct SpecialProfile SpecialProfile; typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile struct SpecialProfile
{ {
...@@ -414,7 +414,8 @@ void runtime_MHeap_Scavenger(void*); ...@@ -414,7 +414,8 @@ void runtime_MHeap_Scavenger(void*);
void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s); void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag); void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat); void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
__asm__(GOSYM_PREFIX "runtime.persistentalloc");
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s); int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime_gc(int32 force); void runtime_gc(int32 force);
uintptr runtime_sweepone(void); uintptr runtime_sweepone(void);
...@@ -428,12 +429,15 @@ void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover); ...@@ -428,12 +429,15 @@ void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size); void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*); void runtime_purgecachedstats(MCache*);
void* runtime_cnew(const Type*) void* runtime_cnew(const Type*)
__asm__(GOSYM_PREFIX "runtime.newobject"); __asm__(GOSYM_PREFIX "runtime.newobject");
void* runtime_cnewarray(const Type*, intgo) void* runtime_cnewarray(const Type*, intgo)
__asm__(GOSYM_PREFIX "runtime.newarray"); __asm__(GOSYM_PREFIX "runtime.newarray");
void runtime_tracealloc(void*, uintptr, uintptr); void runtime_tracealloc(void*, uintptr, uintptr)
void runtime_tracefree(void*, uintptr); __asm__ (GOSYM_PREFIX "runtime.tracealloc");
void runtime_tracegc(void); void runtime_tracefree(void*, uintptr)
__asm__ (GOSYM_PREFIX "runtime.tracefree");
void runtime_tracegc(void)
__asm__ (GOSYM_PREFIX "runtime.tracegc");
uintptr runtime_gettype(void*); uintptr runtime_gettype(void*);
...@@ -455,10 +459,14 @@ struct Obj ...@@ -455,10 +459,14 @@ struct Obj
uintptr ti; // type info uintptr ti; // type info
}; };
void runtime_MProf_Malloc(void*, uintptr); void runtime_MProf_Malloc(void*, uintptr)
void runtime_MProf_Free(Bucket*, uintptr, bool); __asm__ (GOSYM_PREFIX "runtime.mProf_Malloc");
void runtime_MProf_GC(void); void runtime_MProf_Free(Bucket*, uintptr, bool)
void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr)); __asm__ (GOSYM_PREFIX "runtime.mProf_Free");
void runtime_MProf_GC(void)
__asm__ (GOSYM_PREFIX "runtime.mProf_GC");
void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
__asm__ (GOSYM_PREFIX "runtime.iterate_memprof");
int32 runtime_gcprocs(void); int32 runtime_gcprocs(void);
void runtime_helpgc(int32 nproc); void runtime_helpgc(int32 nproc);
void runtime_gchelper(void); void runtime_gchelper(void);
...@@ -467,7 +475,8 @@ G* runtime_wakefing(void); ...@@ -467,7 +475,8 @@ G* runtime_wakefing(void);
extern bool runtime_fingwait; extern bool runtime_fingwait;
extern bool runtime_fingwake; extern bool runtime_fingwake;
void runtime_setprofilebucket(void *p, Bucket *b); void runtime_setprofilebucket(void *p, Bucket *b)
__asm__ (GOSYM_PREFIX "runtime.setprofilebucket");
struct __go_func_type; struct __go_func_type;
struct __go_ptr_type; struct __go_ptr_type;
...@@ -533,7 +542,6 @@ int32 runtime_setgcpercent(int32); ...@@ -533,7 +542,6 @@ int32 runtime_setgcpercent(int32);
#define PoisonStack ((uintptr)0x6868686868686868ULL) #define PoisonStack ((uintptr)0x6868686868686868ULL)
struct Workbuf; struct Workbuf;
void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj)); void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj)); void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj)); void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
...@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i) ...@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0}); enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0}); enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
runtime_proc_scan(&wbuf, enqueue1); runtime_proc_scan(&wbuf, enqueue1);
runtime_MProf_Mark(&wbuf, enqueue1);
runtime_time_scan(&wbuf, enqueue1); runtime_time_scan(&wbuf, enqueue1);
runtime_netpoll_scan(&wbuf, enqueue1); runtime_netpoll_scan(&wbuf, enqueue1);
break; break;
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Malloc profiling.
// Patterned after tcmalloc's algorithms; shorter code.
package runtime
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
#include "defs.h"
#include "go-type.h"
#include "go-string.h"
// NOTE(rsc): Everything here could use cas if contention became an issue.
static Lock proflock;
// All memory allocations are local and do not escape outside of the profiler.
// The profiler is forbidden from referring to garbage-collected memory.
enum { MProf, BProf }; // profile types
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
struct Bucket
{
Bucket *next; // next in hash list
Bucket *allnext; // next in list of all mbuckets/bbuckets
int32 typ;
// Generally unions can break precise GC,
// this one is fine because it does not contain pointers.
union
{
struct // typ == MProf
{
// The following complex 3-stage scheme of stats accumulation
// is required to obtain a consistent picture of mallocs and frees
// for some point in time.
// The problem is that mallocs come in real time, while frees
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
// Mallocs are accounted in recent stats.
// Explicit frees are accounted in recent stats.
// GC frees are accounted in prev stats.
// After GC prev stats are added to final stats and
// recent stats are moved into prev stats.
uintptr allocs;
uintptr frees;
uintptr alloc_bytes;
uintptr free_bytes;
uintptr prev_allocs; // since last but one till last gc
uintptr prev_frees;
uintptr prev_alloc_bytes;
uintptr prev_free_bytes;
uintptr recent_allocs; // since last gc till now
uintptr recent_frees;
uintptr recent_alloc_bytes;
uintptr recent_free_bytes;
};
struct // typ == BProf
{
int64 count;
int64 cycles;
};
};
uintptr hash; // hash of size + stk
uintptr size;
uintptr nstk;
Location stk[1];
};
enum {
BuckHashSize = 179999,
};
static Bucket **buckhash;
static Bucket *mbuckets; // memory profile buckets
static Bucket *bbuckets; // blocking profile buckets
static uintptr bucketmem;
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
static Bucket*
stkbucket(int32 typ, uintptr size, Location *stk, int32 nstk, bool alloc)
{
int32 i, j;
uintptr h;
Bucket *b;
if(buckhash == nil) {
buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats()->buckhash_sys);
if(buckhash == nil)
runtime_throw("runtime: cannot allocate memory");
}
// Hash stack.
h = 0;
for(i=0; i<nstk; i++) {
h += stk[i].pc;
h += h<<10;
h ^= h>>6;
}
// hash in size
h += size;
h += h<<10;
h ^= h>>6;
// finalize
h += h<<3;
h ^= h>>11;
i = h%BuckHashSize;
for(b = buckhash[i]; b; b=b->next) {
if(b->typ == typ && b->hash == h && b->size == size && b->nstk == (uintptr)nstk) {
for(j = 0; j < nstk; j++) {
if(b->stk[j].pc != stk[j].pc ||
b->stk[j].lineno != stk[j].lineno ||
!__go_strings_equal(b->stk[j].filename, stk[j].filename))
break;
}
if (j == nstk)
return b;
}
}
if(!alloc)
return nil;
b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats()->buckhash_sys);
bucketmem += sizeof *b + nstk*sizeof stk[0];
runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
b->typ = typ;
b->hash = h;
b->size = size;
b->nstk = nstk;
b->next = buckhash[i];
buckhash[i] = b;
if(typ == MProf) {
b->allnext = mbuckets;
mbuckets = b;
} else {
b->allnext = bbuckets;
bbuckets = b;
}
return b;
}
static void
MProf_GC(void)
{
Bucket *b;
for(b=mbuckets; b; b=b->allnext) {
b->allocs += b->prev_allocs;
b->frees += b->prev_frees;
b->alloc_bytes += b->prev_alloc_bytes;
b->free_bytes += b->prev_free_bytes;
b->prev_allocs = b->recent_allocs;
b->prev_frees = b->recent_frees;
b->prev_alloc_bytes = b->recent_alloc_bytes;
b->prev_free_bytes = b->recent_free_bytes;
b->recent_allocs = 0;
b->recent_frees = 0;
b->recent_alloc_bytes = 0;
b->recent_free_bytes = 0;
}
}
// Record that a gc just happened: all the 'recent' statistics are now real.
void
runtime_MProf_GC(void)
{
runtime_lock(&proflock);
MProf_GC();
runtime_unlock(&proflock);
}
// Called by malloc to record a profiled block.
void
runtime_MProf_Malloc(void *p, uintptr size)
{
Location stk[32];
Bucket *b;
int32 nstk;
nstk = runtime_callers(1, stk, nelem(stk), false);
runtime_lock(&proflock);
b = stkbucket(MProf, size, stk, nstk, true);
b->recent_allocs++;
b->recent_alloc_bytes += size;
runtime_unlock(&proflock);
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
// This reduces potential contention and chances of deadlocks.
// Since the object must be alive during call to MProf_Malloc,
// it's fine to do this non-atomically.
runtime_setprofilebucket(p, b);
}
// Called when freeing a profiled block.
void
runtime_MProf_Free(Bucket *b, uintptr size, bool freed)
{
runtime_lock(&proflock);
if(freed) {
b->recent_frees++;
b->recent_free_bytes += size;
} else {
b->prev_frees++;
b->prev_free_bytes += size;
}
runtime_unlock(&proflock);
}
int64 runtime_blockprofilerate; // in CPU ticks
void runtime_SetBlockProfileRate(intgo) __asm__ (GOSYM_PREFIX "runtime.SetBlockProfileRate");
void
runtime_SetBlockProfileRate(intgo rate)
{
int64 r;
if(rate <= 0)
r = 0; // disable profiling
else {
// convert ns to cycles, use float64 to prevent overflow during multiplication
r = (float64)rate*runtime_tickspersecond()/(1000*1000*1000);
if(r == 0)
r = 1;
}
runtime_atomicstore64((uint64*)&runtime_blockprofilerate, r);
}
void
runtime_blockevent(int64 cycles, int32 skip)
{
int32 nstk;
int64 rate;
Location stk[32];
Bucket *b;
if(cycles <= 0)
return;
rate = runtime_atomicload64((uint64*)&runtime_blockprofilerate);
if(rate <= 0 || (rate > cycles && runtime_fastrand1()%rate > cycles))
return;
nstk = runtime_callers(skip, stk, nelem(stk), false);
runtime_lock(&proflock);
b = stkbucket(BProf, 0, stk, nstk, true);
b->count++;
b->cycles += cycles;
runtime_unlock(&proflock);
}
// Go interface to profile data. (Declared in debug.go)
// Must match MemProfileRecord in debug.go.
typedef struct Record Record;
struct Record {
int64 alloc_bytes, free_bytes;
int64 alloc_objects, free_objects;
uintptr stk[32];
};
// Write b's data to r.
static void
record(Record *r, Bucket *b)
{
uint32 i;
r->alloc_bytes = b->alloc_bytes;
r->free_bytes = b->free_bytes;
r->alloc_objects = b->allocs;
r->free_objects = b->frees;
for(i=0; i<b->nstk && i<nelem(r->stk); i++)
r->stk[i] = b->stk[i].pc;
for(; i<nelem(r->stk); i++)
r->stk[i] = 0;
}
func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
Bucket *b;
Record *r;
bool clear;
runtime_lock(&proflock);
n = 0;
clear = true;
for(b=mbuckets; b; b=b->allnext) {
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
n++;
if(b->allocs != 0 || b->frees != 0)
clear = false;
}
if(clear) {
// Absolutely no data, suggesting that a garbage collection
// has not yet happened. In order to allow profiling when
// garbage collection is disabled from the beginning of execution,
// accumulate stats as if a GC just happened, and recount buckets.
MProf_GC();
MProf_GC();
n = 0;
for(b=mbuckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
n++;
}
ok = false;
if(n <= p.__count) {
ok = true;
r = (Record*)p.__values;
for(b=mbuckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
record(r++, b);
}
runtime_unlock(&proflock);
}
void
runtime_MProf_Mark(struct Workbuf **wbufp, void (*enqueue1)(struct Workbuf**, Obj))
{
// buckhash is not allocated via mallocgc.
enqueue1(wbufp, (Obj){(byte*)&mbuckets, sizeof mbuckets, 0});
enqueue1(wbufp, (Obj){(byte*)&bbuckets, sizeof bbuckets, 0});
}
void
runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
{
Bucket *b;
runtime_lock(&proflock);
for(b=mbuckets; b; b=b->allnext) {
callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
}
runtime_unlock(&proflock);
}
// Must match BlockProfileRecord in debug.go.
typedef struct BRecord BRecord;
struct BRecord {
int64 count;
int64 cycles;
uintptr stk[32];
};
func BlockProfile(p Slice) (n int, ok bool) {
Bucket *b;
BRecord *r;
int32 i;
runtime_lock(&proflock);
n = 0;
for(b=bbuckets; b; b=b->allnext)
n++;
ok = false;
if(n <= p.__count) {
ok = true;
r = (BRecord*)p.__values;
for(b=bbuckets; b; b=b->allnext, r++) {
r->count = b->count;
r->cycles = b->cycles;
for(i=0; (uintptr)i<b->nstk && (uintptr)i<nelem(r->stk); i++)
r->stk[i] = b->stk[i].pc;
for(; (uintptr)i<nelem(r->stk); i++)
r->stk[i] = 0;
}
}
runtime_unlock(&proflock);
}
// Must match StackRecord in debug.go.
typedef struct TRecord TRecord;
struct TRecord {
uintptr stk[32];
};
func ThreadCreateProfile(p Slice) (n int, ok bool) {
TRecord *r;
M *first, *mp;
int32 i;
first = runtime_atomicloadp(&runtime_allm);
n = 0;
for(mp=first; mp; mp=mp->alllink)
n++;
ok = false;
if(n <= p.__count) {
ok = true;
r = (TRecord*)p.__values;
for(mp=first; mp; mp=mp->alllink) {
for(i = 0; (uintptr)i < nelem(r->stk); i++) {
r->stk[i] = mp->createstack[i].pc;
}
r++;
}
}
}
func Stack(b Slice, all bool) (n int) {
byte *pc;
bool enablegc = false;
pc = (byte*)(uintptr)runtime_getcallerpc(&b);
if(all) {
runtime_acquireWorldsema();
runtime_m()->gcing = 1;
runtime_stopTheWorldWithSema();
enablegc = mstats()->enablegc;
mstats()->enablegc = false;
}
if(b.__count == 0)
n = 0;
else{
G* g = runtime_g();
g->writebuf.__values = b.__values;
g->writebuf.__count = 0;
g->writebuf.__capacity = b.__count;
USED(pc);
runtime_goroutineheader(g);
runtime_traceback();
runtime_printcreatedby(g);
if(all)
runtime_tracebackothers(g);
n = g->writebuf.__count;
g->writebuf.__values = nil;
g->writebuf.__count = 0;
g->writebuf.__capacity = 0;
}
if(all) {
runtime_m()->gcing = 0;
mstats()->enablegc = enablegc;
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
}
}
static void
saveg(G *gp, TRecord *r)
{
int32 n, i;
Location locstk[nelem(r->stk)];
if(gp == runtime_g()) {
n = runtime_callers(0, locstk, nelem(r->stk), false);
for(i = 0; i < n; i++)
r->stk[i] = locstk[i].pc;
}
else {
// FIXME: Not implemented.
n = 0;
}
if((size_t)n < nelem(r->stk))
r->stk[n] = 0;
}
func GoroutineProfile(b Slice) (n int, ok bool) {
uintptr i;
TRecord *r;
G *gp;
ok = false;
n = runtime_gcount();
if(n <= b.__count) {
runtime_acquireWorldsema();
runtime_m()->gcing = 1;
runtime_stopTheWorldWithSema();
n = runtime_gcount();
if(n <= b.__count) {
G* g = runtime_g();
ok = true;
r = (TRecord*)b.__values;
saveg(g, r++);
for(i = 0; i < runtime_allglen; i++) {
gp = runtime_allg[i];
if(gp == g || gp->atomicstatus == _Gdead)
continue;
saveg(gp, r++);
}
}
runtime_m()->gcing = 0;
runtime_releaseWorldsema();
runtime_startTheWorldWithSema();
}
}
// Tracing of alloc/free/gc.
static Lock tracelock;
static const char*
typeinfoname(int32 typeinfo)
{
if(typeinfo == TypeInfo_SingleObject)
return "single object";
else if(typeinfo == TypeInfo_Array)
return "array";
else if(typeinfo == TypeInfo_Chan)
return "channel";
runtime_throw("typinfoname: unknown type info");
return nil;
}
void
runtime_tracealloc(void *p, uintptr size, uintptr typ)
{
const char *name;
Type *type;
runtime_lock(&tracelock);
runtime_m()->traceback = 2;
type = (Type*)(typ & ~3);
name = typeinfoname(typ & 3);
if(type == nil)
runtime_printf("tracealloc(%p, %p, %s)\n", p, size, name);
else
runtime_printf("tracealloc(%p, %p, %s of %S)\n", p, size, name, *type->__reflection);
if(runtime_m()->curg == nil || runtime_g() == runtime_m()->curg) {
runtime_goroutineheader(runtime_g());
runtime_traceback();
} else {
runtime_goroutineheader(runtime_m()->curg);
runtime_traceback();
}
runtime_printf("\n");
runtime_m()->traceback = 0;
runtime_unlock(&tracelock);
}
void
runtime_tracefree(void *p, uintptr size)
{
runtime_lock(&tracelock);
runtime_m()->traceback = 2;
runtime_printf("tracefree(%p, %p)\n", p, size);
runtime_goroutineheader(runtime_g());
runtime_traceback();
runtime_printf("\n");
runtime_m()->traceback = 0;
runtime_unlock(&tracelock);
}
void
runtime_tracegc(void)
{
runtime_lock(&tracelock);
runtime_m()->traceback = 2;
runtime_printf("tracegc()\n");
// running on m->g0 stack; show all non-g0 goroutines
runtime_tracebackothers(runtime_g());
runtime_printf("end tracegc\n");
runtime_printf("\n");
runtime_m()->traceback = 0;
runtime_unlock(&tracelock);
}
...@@ -658,67 +658,12 @@ runtime_main(void* dummy __attribute__((unused))) ...@@ -658,67 +658,12 @@ runtime_main(void* dummy __attribute__((unused)))
} }
void void
runtime_goroutineheader(G *gp)
{
String status;
int64 waitfor;
switch(gp->atomicstatus) {
case _Gidle:
status = runtime_gostringnocopy((const byte*)"idle");
break;
case _Grunnable:
status = runtime_gostringnocopy((const byte*)"runnable");
break;
case _Grunning:
status = runtime_gostringnocopy((const byte*)"running");
break;
case _Gsyscall:
status = runtime_gostringnocopy((const byte*)"syscall");
break;
case _Gwaiting:
if(gp->waitreason.len > 0)
status = gp->waitreason;
else
status = runtime_gostringnocopy((const byte*)"waiting");
break;
default:
status = runtime_gostringnocopy((const byte*)"???");
break;
}
// approx time the G is blocked, in minutes
waitfor = 0;
if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince != 0)
waitfor = (runtime_nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
if(waitfor < 1)
runtime_printf("goroutine %D [%S]:\n", gp->goid, status);
else
runtime_printf("goroutine %D [%S, %D minutes]:\n", gp->goid, status, waitfor);
}
void
runtime_printcreatedby(G *g)
{
if(g != nil && g->gopc != 0 && g->goid != 1) {
String fn;
String file;
intgo line;
if(__go_file_line(g->gopc - 1, -1, &fn, &file, &line)) {
runtime_printf("created by %S\n", fn);
runtime_printf("\t%S:%D\n", file, (int64) line);
}
}
}
void
runtime_tracebackothers(G * volatile me) runtime_tracebackothers(G * volatile me)
{ {
G * volatile gp; G * volatile gp;
Traceback tb; Traceback tb;
int32 traceback; int32 traceback;
Slice slice;
volatile uintptr i; volatile uintptr i;
tb.gp = me; tb.gp = me;
...@@ -739,7 +684,10 @@ runtime_tracebackothers(G * volatile me) ...@@ -739,7 +684,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp); runtime_gogo(gp);
} }
runtime_printtrace(tb.locbuf, tb.c, false); slice.__values = &tb.locbuf[0];
slice.__count = tb.c;
slice.__capacity = tb.c;
runtime_printtrace(slice, nil);
runtime_printcreatedby(gp); runtime_printcreatedby(gp);
} }
...@@ -780,7 +728,10 @@ runtime_tracebackothers(G * volatile me) ...@@ -780,7 +728,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp); runtime_gogo(gp);
} }
runtime_printtrace(tb.locbuf, tb.c, false); slice.__values = &tb.locbuf[0];
slice.__count = tb.c;
slice.__capacity = tb.c;
runtime_printtrace(slice, nil);
runtime_printcreatedby(gp); runtime_printcreatedby(gp);
} }
} }
...@@ -3597,3 +3548,28 @@ sync_runtime_doSpin() ...@@ -3597,3 +3548,28 @@ sync_runtime_doSpin()
{ {
runtime_procyield(ACTIVE_SPIN_CNT); runtime_procyield(ACTIVE_SPIN_CNT);
} }
// For Go code to look at variables, until we port proc.go.
extern M** runtime_go_allm(void)
__asm__ (GOSYM_PREFIX "runtime.allm");
M**
runtime_go_allm()
{
return &runtime_allm;
}
extern Slice runtime_go_allgs(void)
__asm__ (GOSYM_PREFIX "runtime.allgs");
Slice
runtime_go_allgs()
{
Slice s;
s.__values = runtime_allg;
s.__count = runtime_allglen;
s.__capacity = allgcap;
return s;
}
...@@ -90,18 +90,6 @@ runtime_cputicks(void) ...@@ -90,18 +90,6 @@ runtime_cputicks(void)
#endif #endif
} }
bool
runtime_showframe(String s, bool current)
{
static int32 traceback = -1;
if(current && runtime_m()->throwing > 0)
return 1;
if(traceback < 0)
traceback = runtime_gotraceback(nil);
return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
}
// Called to initialize a new m (including the bootstrap m). // Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory. // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void void
......
...@@ -89,7 +89,7 @@ typedef struct __go_interface_type InterfaceType; ...@@ -89,7 +89,7 @@ typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType; typedef struct __go_map_type MapType;
typedef struct __go_channel_type ChanType; typedef struct __go_channel_type ChanType;
typedef struct traceback Traceback; typedef struct tracebackg Traceback;
typedef struct location Location; typedef struct location Location;
...@@ -261,8 +261,10 @@ enum { ...@@ -261,8 +261,10 @@ enum {
}; };
void runtime_hashinit(void); void runtime_hashinit(void);
void runtime_traceback(void); void runtime_traceback(void)
void runtime_tracebackothers(G*); __asm__ (GOSYM_PREFIX "runtime.traceback");
void runtime_tracebackothers(G*)
__asm__ (GOSYM_PREFIX "runtime.tracebackothers");
enum enum
{ {
// The maximum number of frames we print for a traceback // The maximum number of frames we print for a traceback
...@@ -325,8 +327,10 @@ void runtime_sigenable(uint32 sig); ...@@ -325,8 +327,10 @@ void runtime_sigenable(uint32 sig);
void runtime_sigdisable(uint32 sig); void runtime_sigdisable(uint32 sig);
void runtime_sigignore(uint32 sig); void runtime_sigignore(uint32 sig);
int32 runtime_gotraceback(bool *crash); int32 runtime_gotraceback(bool *crash);
void runtime_goroutineheader(G*); void runtime_goroutineheader(G*)
void runtime_printtrace(Location*, int32, bool); __asm__ (GOSYM_PREFIX "runtime.goroutineheader");
void runtime_printtrace(Slice, G*)
__asm__ (GOSYM_PREFIX "runtime.printtrace");
#define runtime_open(p, f, m) open((p), (f), (m)) #define runtime_open(p, f, m) open((p), (f), (m))
#define runtime_read(d, v, n) read((d), (v), (n)) #define runtime_read(d, v, n) read((d), (v), (n))
#define runtime_write(d, v, n) write((d), (v), (n)) #define runtime_write(d, v, n) write((d), (v), (n))
...@@ -561,8 +565,8 @@ void runtime_lockOSThread(void); ...@@ -561,8 +565,8 @@ void runtime_lockOSThread(void);
void runtime_unlockOSThread(void); void runtime_unlockOSThread(void);
bool runtime_lockedOSThread(void); bool runtime_lockedOSThread(void);
bool runtime_showframe(String, bool); void runtime_printcreatedby(G*)
void runtime_printcreatedby(G*); __asm__(GOSYM_PREFIX "runtime.printcreatedby");
uintptr runtime_memlimit(void); uintptr runtime_memlimit(void);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment