Commit 1f0be9ee by Ian Lance Taylor

runtime: copy mprof code from Go 1.7 runtime

    
    Also create a gccgo version of some of the traceback code in
    traceback_gccgo.go, replacing some code currently in C.
    
    This required modifying the compiler so that when compiling the runtime
    package a slice expression does not cause a local array variable to
    escape to the heap.
    
    Reviewed-on: https://go-review.googlesource.com/31230

From-SVN: r241189
parent 2045acd9
993840643e27e52cda7e86e6a775f54443ea5d07
ec3dc927da71d15cac48a13c0fb0c1f94572d0d2
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
......@@ -10308,7 +10308,7 @@ Array_index_expression::do_determine_type(const Type_context*)
// Check types of an array index.
void
Array_index_expression::do_check_types(Gogo*)
Array_index_expression::do_check_types(Gogo* gogo)
{
Numeric_constant nc;
unsigned long v;
......@@ -10427,7 +10427,18 @@ Array_index_expression::do_check_types(Gogo*)
if (!this->array_->is_addressable())
this->report_error(_("slice of unaddressable value"));
else
this->array_->address_taken(true);
{
bool escapes = true;
// When compiling the runtime, a slice operation does not
// cause local variables to escape. When escape analysis
// becomes the default, this should be changed to make it an
// error if we have a slice operation that escapes.
if (gogo->compiling_runtime() && gogo->package_name() == "runtime")
escapes = false;
this->array_->address_taken(escapes);
}
}
}
......
......@@ -478,7 +478,6 @@ runtime_files = \
runtime/go-signal.c \
runtime/go-strcmp.c \
runtime/go-strslice.c \
runtime/go-traceback.c \
runtime/go-type-complex.c \
runtime/go-type-eface.c \
runtime/go-type-float.c \
......@@ -515,7 +514,6 @@ runtime_files = \
go-iface.c \
lfstack.c \
malloc.c \
mprof.c \
netpoll.c \
rdebug.c \
reflect.c \
......
......@@ -253,17 +253,17 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
go-nanotime.lo go-now.lo go-new.lo go-nosys.lo go-panic.lo \
go-recover.lo go-reflect-call.lo go-runtime-error.lo \
go-setenv.lo go-signal.lo go-strcmp.lo go-strslice.lo \
go-traceback.lo go-type-complex.lo go-type-eface.lo \
go-type-float.lo go-type-identity.lo go-type-interface.lo \
go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \
go-unsafe-newarray.lo go-unsafe-pointer.lo go-unsetenv.lo \
go-unwind.lo go-varargs.lo env_posix.lo heapdump.lo mcache.lo \
mcentral.lo $(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo \
msize.lo $(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
go-type-complex.lo go-type-eface.lo go-type-float.lo \
go-type-identity.lo go-type-interface.lo go-type-string.lo \
go-typedesc-equal.lo go-unsafe-new.lo go-unsafe-newarray.lo \
go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
env_posix.lo heapdump.lo mcache.lo mcentral.lo \
$(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
$(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
$(am__objects_4) go-iface.lo lfstack.lo malloc.lo mprof.lo \
netpoll.lo rdebug.lo reflect.lo runtime1.lo sigqueue.lo \
time.lo $(am__objects_5)
$(am__objects_4) go-iface.lo lfstack.lo malloc.lo netpoll.lo \
rdebug.lo reflect.lo runtime1.lo sigqueue.lo time.lo \
$(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
......@@ -877,7 +877,6 @@ runtime_files = \
runtime/go-signal.c \
runtime/go-strcmp.c \
runtime/go-strslice.c \
runtime/go-traceback.c \
runtime/go-type-complex.c \
runtime/go-type-eface.c \
runtime/go-type-float.c \
......@@ -914,7 +913,6 @@ runtime_files = \
go-iface.c \
lfstack.c \
malloc.c \
mprof.c \
netpoll.c \
rdebug.c \
reflect.c \
......@@ -1593,7 +1591,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-signal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strcmp.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-strslice.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-traceback.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-complex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-eface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-float.Plo@am__quote@
......@@ -1620,7 +1617,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mfixalloc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mgc0.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheap.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mprof.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msize.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_epoll.Plo@am__quote@
......@@ -1986,13 +1982,6 @@ go-strslice.lo: runtime/go-strslice.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-strslice.lo `test -f 'runtime/go-strslice.c' || echo '$(srcdir)/'`runtime/go-strslice.c
go-traceback.lo: runtime/go-traceback.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-traceback.lo -MD -MP -MF $(DEPDIR)/go-traceback.Tpo -c -o go-traceback.lo `test -f 'runtime/go-traceback.c' || echo '$(srcdir)/'`runtime/go-traceback.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-traceback.Tpo $(DEPDIR)/go-traceback.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-traceback.c' object='go-traceback.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-traceback.lo `test -f 'runtime/go-traceback.c' || echo '$(srcdir)/'`runtime/go-traceback.c
go-type-complex.lo: runtime/go-type-complex.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-complex.lo -MD -MP -MF $(DEPDIR)/go-type-complex.Tpo -c -o go-type-complex.lo `test -f 'runtime/go-type-complex.c' || echo '$(srcdir)/'`runtime/go-type-complex.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-type-complex.Tpo $(DEPDIR)/go-type-complex.Plo
......
......@@ -4,18 +4,6 @@
package runtime
// Breakpoint executes a breakpoint trap.
func Breakpoint()
// LockOSThread wires the calling goroutine to its current operating system thread.
// Until the calling goroutine exits or calls UnlockOSThread, it will always
// execute in that thread, and no other goroutine can.
func LockOSThread()
// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
func UnlockOSThread()
// GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not
// change the current setting.
......@@ -36,145 +24,6 @@ func NumCgoCall() int64
// NumGoroutine returns the number of goroutines that currently exist.
func NumGoroutine() int
// MemProfileRate controls the fraction of memory allocations
// that are recorded and reported in the memory profile.
// The profiler aims to sample an average of
// one allocation per MemProfileRate bytes allocated.
//
// To include every allocated block in the profile, set MemProfileRate to 1.
// To turn off profiling entirely, set MemProfileRate to 0.
//
// The tools that process the memory profiles assume that the
// profile rate is constant across the lifetime of the program
// and equal to the current value. Programs that change the
// memory profiling rate should do so just once, as early as
// possible in the execution of the program (for example,
// at the beginning of main).
var MemProfileRate int = 512 * 1024
// A MemProfileRecord describes the live objects allocated
// by a particular call sequence (stack trace).
type MemProfileRecord struct {
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
AllocObjects, FreeObjects int64 // number of objects allocated, freed
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (r *MemProfileRecord) InUseObjects() int64 {
return r.AllocObjects - r.FreeObjects
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *MemProfileRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfile returns n, the number of records in the current memory profile.
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
// If len(p) < n, MemProfile does not change p and returns n, false.
//
// If inuseZero is true, the profile includes allocation records
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
// These are sites where memory was allocated, but it has all
// been released back to the runtime.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.memprofile flag instead
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
// A StackRecord describes a single execution stack.
type StackRecord struct {
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *StackRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool)
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool)
// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
// blocking until data is available. If profiling is turned off and all the profile
// data accumulated while it was on has been returned, CPUProfile returns nil.
// The caller must save the returned data before calling CPUProfile again.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.cpuprofile flag instead of calling
// CPUProfile directly.
func CPUProfile() []byte
// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
// If hz <= 0, SetCPUProfileRate turns off profiling.
// If the profiler is on, the rate cannot be changed without first turning it off.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.cpuprofile flag instead of calling
// SetCPUProfileRate directly.
func SetCPUProfileRate(hz int)
// SetBlockProfileRate controls the fraction of goroutine blocking events
// that are reported in the blocking profile. The profiler aims to sample
// an average of one blocking event per rate nanoseconds spent blocked.
//
// To include every blocking event in the profile, pass rate = 1.
// To turn off profiling entirely, pass rate <= 0.
func SetBlockProfileRate(rate int)
// BlockProfileRecord describes blocking events originated
// at a particular call sequence (stack trace).
type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}
// BlockProfile returns n, the number of records in the current blocking profile.
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
// If len(p) < n, BlockProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool)
// Stack formats a stack trace of the calling goroutine into buf
// and returns the number of bytes written to buf.
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int
// Get field tracking information. Only fields with a tag go:"track"
// are tracked. This function will add every such field that is
// referenced to the map. The keys in the map will be
......
......@@ -394,7 +394,7 @@ type g struct {
issystem bool // do not output in stack dump
isbackground bool // ignore in deadlock detector
traceback *traceback // stack traceback buffer
traceback *tracebackg // stack traceback buffer
context g_ucontext_t // saved context for setcontext
stackcontext [10]unsafe.Pointer // split-stack context
......@@ -801,21 +801,6 @@ var (
// array.
type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(unsafe.Pointer(nil))]unsafe.Pointer
// traceback is used to collect stack traces from other goroutines.
type traceback struct {
gp *g
locbuf [_TracebackMaxFrames]location
c int
}
// location is a location in the program, used for backtraces.
type location struct {
pc uintptr
filename string
function string
lineno int
}
// cgoMal tracks allocations made by _cgo_allocate
// FIXME: _cgo_allocate has been removed from gc and can probably be
// removed from gccgo too.
......
......@@ -307,11 +307,6 @@ func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
func goparkunlock(*mutex, string, byte, int)
func goready(*g, int)
// Temporary for gccgo until we port mprof.go.
var blockprofilerate uint64
func blockevent(cycles int64, skip int) {}
// Temporary hack for gccgo until we port proc.go.
//go:nosplit
func acquireSudog() *sudog {
......@@ -428,3 +423,24 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
func cpuprofAdd(stk []uintptr) {
cpuprof.add(stk)
}
// For gccgo until we port proc.go.
func Breakpoint()
func LockOSThread()
func UnlockOSThread()
func allm() *m
func allgs() []*g
//go:nosplit
func readgstatus(gp *g) uint32 {
return atomic.Load(&gp.atomicstatus)
}
// Temporary for gccgo until we port malloc.go
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
// Temporary for gccgo until we port mheap.go
func setprofilebucket(p unsafe.Pointer, b *bucket)
// Currently in proc.c.
func tracebackothers(*g)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Traceback support for gccgo.
// The actual traceback code is written in C.
package runtime
import (
"runtime/internal/sys"
_ "unsafe" // for go:linkname
)
// For gccgo, use go:linkname to rename compiler-called functions to
// themselves, so that the compiler will export them.
// These are temporary for C runtime code to call.
//go:linkname traceback runtime.traceback
//go:linkname printtrace runtime.printtrace
//go:linkname goroutineheader runtime.goroutineheader
//go:linkname printcreatedby runtime.printcreatedby
func printcreatedby(gp *g) {
// Show what created goroutine, except main goroutine (goid 1).
pc := gp.gopc
tracepc := pc // back up to CALL instruction for funcfileline.
entry := funcentry(tracepc)
if entry != 0 && tracepc > entry {
tracepc -= sys.PCQuantum
}
function, file, line := funcfileline(tracepc, -1)
if function != "" && showframe(function, gp) && gp.goid != 1 {
print("created by ", function, "\n")
print("\t", file, ":", line)
if entry != 0 && pc > entry {
print(" +", hex(pc-entry))
}
print("\n")
}
}
// tracebackg is used to collect stack traces from other goroutines.
type tracebackg struct {
gp *g
locbuf [_TracebackMaxFrames]location
c int
}
// location is a location in the program, used for backtraces.
type location struct {
pc uintptr
filename string
function string
lineno int
}
//extern runtime_callers
func c_callers(skip int32, locbuf *location, max int32, keepThunks bool) int32
// callers returns a stack trace of the current goroutine.
// The gc version of callers takes []uintptr, but we take []location.
func callers(skip int, locbuf []location) int {
n := c_callers(int32(skip), &locbuf[0], int32(len(locbuf)), false)
return int(n)
}
// traceback prints a traceback of the current goroutine.
// This differs from the gc version, which is given pc, sp, lr and g and
// can print a traceback of any goroutine.
func traceback() {
var locbuf [100]location
c := c_callers(1, &locbuf[0], int32(len(locbuf)), false)
printtrace(locbuf[:c], getg())
}
// printtrace prints a traceback from locbuf.
func printtrace(locbuf []location, gp *g) {
for i := range locbuf {
if showframe(locbuf[i].function, gp) {
print(locbuf[i].function, "\n\t", locbuf[i].filename, ":", locbuf[i].lineno)
}
}
}
// showframe returns whether to print a frame in a traceback.
// name is the function name.
func showframe(name string, gp *g) bool {
g := getg()
if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
return true
}
level, _, _ := gotraceback()
// Special case: always show runtime.gopanic frame, so that we can
// see where a panic started in the middle of a stack trace.
// See golang.org/issue/5832.
// __go_panic is the current gccgo name.
if name == "runtime.gopanic" || name == "__go_panic" {
return true
}
return level > 1 || contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name))
}
// isExportedRuntime reports whether name is an exported runtime function.
// It is only for runtime functions, so ASCII A-Z is fine.
func isExportedRuntime(name string) bool {
const n = len("runtime.")
return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z'
}
var gStatusStrings = [...]string{
_Gidle: "idle",
_Grunnable: "runnable",
_Grunning: "running",
_Gsyscall: "syscall",
_Gwaiting: "waiting",
_Gdead: "dead",
_Gcopystack: "copystack",
}
func goroutineheader(gp *g) {
gpstatus := readgstatus(gp)
isScan := gpstatus&_Gscan != 0
gpstatus &^= _Gscan // drop the scan bit
// Basic string status
var status string
if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
status = gStatusStrings[gpstatus]
} else {
status = "???"
}
// Override.
if gpstatus == _Gwaiting && gp.waitreason != "" {
status = gp.waitreason
}
// approx time the G is blocked, in minutes
var waitfor int64
if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
waitfor = (nanotime() - gp.waitsince) / 60e9
}
print("goroutine ", gp.goid, " [", status)
if isScan {
print(" (scan)")
}
if waitfor >= 1 {
print(", ", waitfor, " minutes")
}
if gp.lockedm != nil {
print(", locked to thread")
}
print("]:\n")
}
// isSystemGoroutine reports whether the goroutine g must be omitted in
// stack dumps and deadlock detector.
func isSystemGoroutine(gp *g) bool {
// FIXME.
return false
}
/* go-traceback.c -- stack backtrace for Go.
Copyright 2012 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
#include "config.h"
#include "runtime.h"
/* Print a stack trace for the current goroutine. */
void
runtime_traceback ()
{
Location locbuf[100];
int32 c;
c = runtime_callers (1, locbuf, nelem (locbuf), false);
runtime_printtrace (locbuf, c, true);
}
void
runtime_printtrace (Location *locbuf, int32 c, bool current)
{
int32 i;
for (i = 0; i < c; ++i)
{
if (runtime_showframe (locbuf[i].function, current))
{
runtime_printf ("%S\n", locbuf[i].function);
runtime_printf ("\t%S:%D\n", locbuf[i].filename,
(int64) locbuf[i].lineno);
}
}
}
......@@ -303,7 +303,7 @@ struct SpecialFinalizer
};
// The described object is being heap profiled.
typedef struct Bucket Bucket; // from mprof.goc
typedef struct bucket Bucket; // from mprof.go
typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile
{
......@@ -414,7 +414,8 @@ void runtime_MHeap_Scavenger(void*);
void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
__asm__(GOSYM_PREFIX "runtime.persistentalloc");
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime_gc(int32 force);
uintptr runtime_sweepone(void);
......@@ -428,12 +429,15 @@ void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*);
void* runtime_cnew(const Type*)
__asm__(GOSYM_PREFIX "runtime.newobject");
__asm__(GOSYM_PREFIX "runtime.newobject");
void* runtime_cnewarray(const Type*, intgo)
__asm__(GOSYM_PREFIX "runtime.newarray");
void runtime_tracealloc(void*, uintptr, uintptr);
void runtime_tracefree(void*, uintptr);
void runtime_tracegc(void);
__asm__(GOSYM_PREFIX "runtime.newarray");
void runtime_tracealloc(void*, uintptr, uintptr)
__asm__ (GOSYM_PREFIX "runtime.tracealloc");
void runtime_tracefree(void*, uintptr)
__asm__ (GOSYM_PREFIX "runtime.tracefree");
void runtime_tracegc(void)
__asm__ (GOSYM_PREFIX "runtime.tracegc");
uintptr runtime_gettype(void*);
......@@ -455,10 +459,14 @@ struct Obj
uintptr ti; // type info
};
void runtime_MProf_Malloc(void*, uintptr);
void runtime_MProf_Free(Bucket*, uintptr, bool);
void runtime_MProf_GC(void);
void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
void runtime_MProf_Malloc(void*, uintptr)
__asm__ (GOSYM_PREFIX "runtime.mProf_Malloc");
void runtime_MProf_Free(Bucket*, uintptr, bool)
__asm__ (GOSYM_PREFIX "runtime.mProf_Free");
void runtime_MProf_GC(void)
__asm__ (GOSYM_PREFIX "runtime.mProf_GC");
void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
__asm__ (GOSYM_PREFIX "runtime.iterate_memprof");
int32 runtime_gcprocs(void);
void runtime_helpgc(int32 nproc);
void runtime_gchelper(void);
......@@ -467,7 +475,8 @@ G* runtime_wakefing(void);
extern bool runtime_fingwait;
extern bool runtime_fingwake;
void runtime_setprofilebucket(void *p, Bucket *b);
void runtime_setprofilebucket(void *p, Bucket *b)
__asm__ (GOSYM_PREFIX "runtime.setprofilebucket");
struct __go_func_type;
struct __go_ptr_type;
......@@ -533,7 +542,6 @@ int32 runtime_setgcpercent(int32);
#define PoisonStack ((uintptr)0x6868686868686868ULL)
struct Workbuf;
void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
......@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
runtime_proc_scan(&wbuf, enqueue1);
runtime_MProf_Mark(&wbuf, enqueue1);
runtime_time_scan(&wbuf, enqueue1);
runtime_netpoll_scan(&wbuf, enqueue1);
break;
......
......@@ -658,67 +658,12 @@ runtime_main(void* dummy __attribute__((unused)))
}
void
runtime_goroutineheader(G *gp)
{
String status;
int64 waitfor;
switch(gp->atomicstatus) {
case _Gidle:
status = runtime_gostringnocopy((const byte*)"idle");
break;
case _Grunnable:
status = runtime_gostringnocopy((const byte*)"runnable");
break;
case _Grunning:
status = runtime_gostringnocopy((const byte*)"running");
break;
case _Gsyscall:
status = runtime_gostringnocopy((const byte*)"syscall");
break;
case _Gwaiting:
if(gp->waitreason.len > 0)
status = gp->waitreason;
else
status = runtime_gostringnocopy((const byte*)"waiting");
break;
default:
status = runtime_gostringnocopy((const byte*)"???");
break;
}
// approx time the G is blocked, in minutes
waitfor = 0;
if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince != 0)
waitfor = (runtime_nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
if(waitfor < 1)
runtime_printf("goroutine %D [%S]:\n", gp->goid, status);
else
runtime_printf("goroutine %D [%S, %D minutes]:\n", gp->goid, status, waitfor);
}
void
runtime_printcreatedby(G *g)
{
if(g != nil && g->gopc != 0 && g->goid != 1) {
String fn;
String file;
intgo line;
if(__go_file_line(g->gopc - 1, -1, &fn, &file, &line)) {
runtime_printf("created by %S\n", fn);
runtime_printf("\t%S:%D\n", file, (int64) line);
}
}
}
void
runtime_tracebackothers(G * volatile me)
{
G * volatile gp;
Traceback tb;
int32 traceback;
Slice slice;
volatile uintptr i;
tb.gp = me;
......@@ -739,7 +684,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp);
}
runtime_printtrace(tb.locbuf, tb.c, false);
slice.__values = &tb.locbuf[0];
slice.__count = tb.c;
slice.__capacity = tb.c;
runtime_printtrace(slice, nil);
runtime_printcreatedby(gp);
}
......@@ -780,7 +728,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp);
}
runtime_printtrace(tb.locbuf, tb.c, false);
slice.__values = &tb.locbuf[0];
slice.__count = tb.c;
slice.__capacity = tb.c;
runtime_printtrace(slice, nil);
runtime_printcreatedby(gp);
}
}
......@@ -3597,3 +3548,28 @@ sync_runtime_doSpin()
{
runtime_procyield(ACTIVE_SPIN_CNT);
}
// For Go code to look at variables, until we port proc.go.
extern M** runtime_go_allm(void)
__asm__ (GOSYM_PREFIX "runtime.allm");
M**
runtime_go_allm()
{
return &runtime_allm;
}
extern Slice runtime_go_allgs(void)
__asm__ (GOSYM_PREFIX "runtime.allgs");
Slice
runtime_go_allgs()
{
Slice s;
s.__values = runtime_allg;
s.__count = runtime_allglen;
s.__capacity = allgcap;
return s;
}
......@@ -90,18 +90,6 @@ runtime_cputicks(void)
#endif
}
bool
runtime_showframe(String s, bool current)
{
static int32 traceback = -1;
if(current && runtime_m()->throwing > 0)
return 1;
if(traceback < 0)
traceback = runtime_gotraceback(nil);
return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void
......
......@@ -89,7 +89,7 @@ typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType;
typedef struct __go_channel_type ChanType;
typedef struct traceback Traceback;
typedef struct tracebackg Traceback;
typedef struct location Location;
......@@ -261,8 +261,10 @@ enum {
};
void runtime_hashinit(void);
void runtime_traceback(void);
void runtime_tracebackothers(G*);
void runtime_traceback(void)
__asm__ (GOSYM_PREFIX "runtime.traceback");
void runtime_tracebackothers(G*)
__asm__ (GOSYM_PREFIX "runtime.tracebackothers");
enum
{
// The maximum number of frames we print for a traceback
......@@ -325,8 +327,10 @@ void runtime_sigenable(uint32 sig);
void runtime_sigdisable(uint32 sig);
void runtime_sigignore(uint32 sig);
int32 runtime_gotraceback(bool *crash);
void runtime_goroutineheader(G*);
void runtime_printtrace(Location*, int32, bool);
void runtime_goroutineheader(G*)
__asm__ (GOSYM_PREFIX "runtime.goroutineheader");
void runtime_printtrace(Slice, G*)
__asm__ (GOSYM_PREFIX "runtime.printtrace");
#define runtime_open(p, f, m) open((p), (f), (m))
#define runtime_read(d, v, n) read((d), (v), (n))
#define runtime_write(d, v, n) write((d), (v), (n))
......@@ -561,8 +565,8 @@ void runtime_lockOSThread(void);
void runtime_unlockOSThread(void);
bool runtime_lockedOSThread(void);
bool runtime_showframe(String, bool);
void runtime_printcreatedby(G*);
void runtime_printcreatedby(G*)
__asm__(GOSYM_PREFIX "runtime.printcreatedby");
uintptr runtime_memlimit(void);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment