Commit 75791bab by Ian Lance Taylor

runtime: use -fgo-c-header to build C header file

    
    Use the new -fgo-c-header option to build a header file for the Go
    runtime code in libgo/go/runtime, and use the new header file in the C
    runtime code in libgo/runtime.  This will ensure that the Go code and C
    code share the same data structures as we convert the runtime from C to
    Go.
    
    The new file libgo/go/runtime/runtime2.go is copied from the Go 1.7
    release, and then edited to remove unnecessary data structures and
    modify others for use with libgo.
    
    The new file libgo/go/runtime/mcache.go is an initial version of the
    same files in the Go 1.7 release, and will be replaced by the Go 1.7
    file when we convert to the new memory allocator.
    
    The new file libgo/go/runtime/type.go describes the gccgo version of the
    reflection data structures, and replaces the Go 1.7 runtime file which
    describes the gc version of those structures.
    
    Using the new header file means changing a number of struct fields to
    use Go naming conventions (that is, no underscores) and to rename
    constants to have a leading underscore so that they are not exported
    from the Go package.  These names were updated in the C code.
    
    The C code was also changed to drop the thread-local variable m, as was
    done some time ago in the gc sources.  Now the m field is always
    accessed using g->m, where g is the single remaining thread-local
    variable.  This in turn required some adjustments to set g->m correctly
    in all cases.
    
    Also pass the new -fgo-compiling-runtime option when compiling the
    runtime package, although that option doesn't do anything yet.
    
    Reviewed-on: https://go-review.googlesource.com/28051

From-SVN: r239872
parent 7875b41f
9c91e7eeb404b5b639cd6e80e2a38da948bb35ec
394486a1cec9bbb81216311ed153179d9fe1c2c5
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
......@@ -595,6 +595,16 @@ s-version: Makefile
$(SHELL) $(srcdir)/mvifdiff.sh version.go.tmp version.go
$(STAMP) $@
runtime_sysinfo.go: s-runtime_sysinfo; @true
s-runtime_sysinfo: sysinfo.go
rm -f tmp-runtime_sysinfo.go
echo 'package runtime' > tmp-runtime_sysinfo.go
echo >> tmp-runtime_sysinfo.go
grep 'const _sizeof_ucontext_t ' sysinfo.go >> tmp-runtime_sysinfo.go
grep 'type _sigset_t ' sysinfo.go >> tmp-runtime_sysinfo.go
$(SHELL) $(srcdir)/mvifdiff.sh tmp-runtime_sysinfo.go runtime_sysinfo.go
$(STAMP) $@
noinst_DATA = zstdpkglist.go
# Generate the list of go std packages that were included in libgo
......@@ -877,6 +887,13 @@ libgolibbegin_a_CFLAGS = $(AM_CFLAGS) -fPIC
libnetgo_a_SOURCES =
libnetgo_a_LIBADD = netgo.o
# Make sure runtime.inc is built before compiling any .c file.
$(libgo_la_OBJECTS): runtime.inc
$(libgo_llgo_la_OBJECTS): runtime.inc
$(libgobegin_a_OBJECTS): runtime.inc
$(libgobegin_llgo_a_OBJECTS): runtime.inc
$(libgolibbegin_a_OBJECTS): runtime.inc
LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS))
GOCFLAGS = $(CFLAGS)
......@@ -904,7 +921,7 @@ BUILDDEPS = \
BUILDPACKAGE = \
$(MKDIR_P) $(@D); \
files=`echo $^ | sed -e 's/[^ ]*\.gox//g' -e 's/[^ ]*\.dep//'`; \
$(LTGOCOMPILE) -I . -c -fgo-pkgpath=`echo $@ | sed -e 's/.lo$$//' -e 's/-go$$//'` -o $@ $$files
$(LTGOCOMPILE) -I . -c -fgo-pkgpath=`echo $@ | sed -e 's/.lo$$//' -e 's/-go$$//'` $($(subst -,_,$(subst .,_,$(subst /,_,$@)))_GOCFLAGS) -o $@ $$files
# Build deps for netgo.o.
BUILDNETGODEPS = \
......@@ -1006,7 +1023,7 @@ bytes.lo.dep: $(srcdir)/go/bytes/*.go
$(BUILDDEPS)
bytes.lo:
$(BUILDPACKAGE)
bytes/index.lo: go/bytes/indexbyte.c
bytes/index.lo: go/bytes/indexbyte.c runtime.inc
@$(MKDIR_P) bytes
$(LTCOMPILE) -c -o bytes/index.lo $(srcdir)/go/bytes/indexbyte.c
bytes/check: $(CHECK_DEPS)
......@@ -1191,7 +1208,7 @@ reflect-go.lo:
$(BUILDPACKAGE)
reflect/check: $(CHECK_DEPS)
@$(CHECK)
reflect/makefunc_ffi_c.lo: go/reflect/makefunc_ffi_c.c
reflect/makefunc_ffi_c.lo: go/reflect/makefunc_ffi_c.c runtime.inc
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<
.PHONY: reflect/check
......@@ -1205,13 +1222,18 @@ regexp/check: $(CHECK_DEPS)
@$(CHECK)
.PHONY: regexp/check
extra_go_files_runtime = version.go
extra_go_files_runtime = runtime_sysinfo.go version.go
@go_include@ runtime-go.lo.dep
runtime-go.lo.dep: $(srcdir)/go/runtime/*.go
runtime-go.lo.dep: $(srcdir)/go/runtime/*.go $(extra_go_files_runtime)
$(BUILDDEPS)
runtime_go_lo_GOCFLAGS = -fgo-c-header=runtime.inc.tmp -fgo-compiling-runtime
runtime-go.lo:
$(BUILDPACKAGE)
runtime.inc: s-runtime-inc; @true
s-runtime-inc: runtime-go.lo
$(SHELL) $(srcdir)/mvifdiff.sh runtime.inc.tmp runtime.inc
$(STAMP) $@
runtime/check: $(CHECK_DEPS)
@$(CHECK)
.PHONY: runtime/check
......@@ -1239,7 +1261,7 @@ strings.lo.dep: $(srcdir)/go/strings/*.go
$(BUILDDEPS)
strings.lo:
$(BUILDPACKAGE)
strings/index.lo: go/strings/indexbyte.c
strings/index.lo: go/strings/indexbyte.c runtime.inc
@$(MKDIR_P) strings
$(LTCOMPILE) -c -o strings/index.lo $(srcdir)/go/strings/indexbyte.c
strings/check: $(CHECK_DEPS)
......@@ -2062,7 +2084,7 @@ log/syslog.lo.dep: $(srcdir)/go/log/syslog/*.go
$(BUILDDEPS)
log/syslog.lo:
$(BUILDPACKAGE)
log/syslog/syslog_c.lo: go/log/syslog/syslog_c.c log/syslog.lo
log/syslog/syslog_c.lo: go/log/syslog/syslog_c.c runtime.inc log/syslog.lo
@$(MKDIR_P) log/syslog
$(LTCOMPILE) -c -o $@ $(srcdir)/go/log/syslog/syslog_c.c
log/syslog/check: $(CHECK_DEPS)
......@@ -2348,7 +2370,7 @@ sync/atomic.lo.dep: $(srcdir)/go/sync/atomic/*.go
$(BUILDDEPS)
sync/atomic.lo:
$(BUILDPACKAGE)
sync/atomic_c.lo: go/sync/atomic/atomic.c sync/atomic.lo
sync/atomic_c.lo: go/sync/atomic/atomic.c runtime.inc sync/atomic.lo
$(LTCOMPILE) -c -o $@ $(srcdir)/go/sync/atomic/atomic.c
sync/atomic/check: $(CHECK_DEPS)
@$(CHECK)
......@@ -2427,17 +2449,17 @@ unicode/utf8/check: $(CHECK_DEPS)
.PHONY: unicode/utf8/check
@go_include@ syscall.lo.dep
syscall.lo.dep: $(srcdir)/go/syscall/*.go
syscall.lo.dep: $(srcdir)/go/syscall/*.go $(extra_go_files_syscall)
$(BUILDDEPS)
syscall.lo:
$(BUILDPACKAGE)
syscall/errno.lo: go/syscall/errno.c
syscall/errno.lo: go/syscall/errno.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/signame.lo: go/syscall/signame.c
syscall/signame.lo: go/syscall/signame.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/wait.lo: go/syscall/wait.c
syscall/wait.lo: go/syscall/wait.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/check: $(CHECK_DEPS)
......
......@@ -1162,7 +1162,7 @@ BUILDDEPS = \
BUILDPACKAGE = \
$(MKDIR_P) $(@D); \
files=`echo $^ | sed -e 's/[^ ]*\.gox//g' -e 's/[^ ]*\.dep//'`; \
$(LTGOCOMPILE) -I . -c -fgo-pkgpath=`echo $@ | sed -e 's/.lo$$//' -e 's/-go$$//'` -o $@ $$files
$(LTGOCOMPILE) -I . -c -fgo-pkgpath=`echo $@ | sed -e 's/.lo$$//' -e 's/-go$$//'` $($(subst -,_,$(subst .,_,$(subst /,_,$@)))_GOCFLAGS) -o $@ $$files
# Build deps for netgo.o.
......@@ -1235,7 +1235,8 @@ CHECK_DEPS = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \
@HAVE_STAT_TIMESPEC_FALSE@@LIBGO_IS_SOLARIS_TRUE@matchargs_os =
@HAVE_STAT_TIMESPEC_TRUE@@LIBGO_IS_SOLARIS_TRUE@matchargs_os = --tag=solaristag
@LIBGO_IS_SOLARIS_FALSE@matchargs_os =
extra_go_files_runtime = version.go
extra_go_files_runtime = runtime_sysinfo.go version.go
runtime_go_lo_GOCFLAGS = -fgo-c-header=runtime.inc.tmp -fgo-compiling-runtime
@LIBGO_IS_BSD_TRUE@golang_org_x_net_route_lo = \
@LIBGO_IS_BSD_TRUE@ golang_org/x/net/route/route.lo
......@@ -3570,6 +3571,16 @@ s-version: Makefile
$(SHELL) $(srcdir)/mvifdiff.sh version.go.tmp version.go
$(STAMP) $@
runtime_sysinfo.go: s-runtime_sysinfo; @true
s-runtime_sysinfo: sysinfo.go
rm -f tmp-runtime_sysinfo.go
echo 'package runtime' > tmp-runtime_sysinfo.go
echo >> tmp-runtime_sysinfo.go
grep 'const _sizeof_ucontext_t ' sysinfo.go >> tmp-runtime_sysinfo.go
grep 'type _sigset_t ' sysinfo.go >> tmp-runtime_sysinfo.go
$(SHELL) $(srcdir)/mvifdiff.sh tmp-runtime_sysinfo.go runtime_sysinfo.go
$(STAMP) $@
# Generate the list of go std packages that were included in libgo
zstdpkglist.go: s-zstdpkglist; @true
s-zstdpkglist: Makefile
......@@ -3639,6 +3650,13 @@ s-epoll: Makefile
$(SHELL) $(srcdir)/mvifdiff.sh epoll.go.tmp epoll.go
$(STAMP) $@
# Make sure runtime.inc is built before compiling any .c file.
$(libgo_la_OBJECTS): runtime.inc
$(libgo_llgo_la_OBJECTS): runtime.inc
$(libgobegin_a_OBJECTS): runtime.inc
$(libgobegin_llgo_a_OBJECTS): runtime.inc
$(libgolibbegin_a_OBJECTS): runtime.inc
@go_include@ bufio.lo.dep
bufio.lo.dep: $(srcdir)/go/bufio/*.go
$(BUILDDEPS)
......@@ -3653,7 +3671,7 @@ bytes.lo.dep: $(srcdir)/go/bytes/*.go
$(BUILDDEPS)
bytes.lo:
$(BUILDPACKAGE)
bytes/index.lo: go/bytes/indexbyte.c
bytes/index.lo: go/bytes/indexbyte.c runtime.inc
@$(MKDIR_P) bytes
$(LTCOMPILE) -c -o bytes/index.lo $(srcdir)/go/bytes/indexbyte.c
bytes/check: $(CHECK_DEPS)
......@@ -3828,7 +3846,7 @@ reflect-go.lo:
$(BUILDPACKAGE)
reflect/check: $(CHECK_DEPS)
@$(CHECK)
reflect/makefunc_ffi_c.lo: go/reflect/makefunc_ffi_c.c
reflect/makefunc_ffi_c.lo: go/reflect/makefunc_ffi_c.c runtime.inc
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<
.PHONY: reflect/check
......@@ -3843,10 +3861,14 @@ regexp/check: $(CHECK_DEPS)
.PHONY: regexp/check
@go_include@ runtime-go.lo.dep
runtime-go.lo.dep: $(srcdir)/go/runtime/*.go
runtime-go.lo.dep: $(srcdir)/go/runtime/*.go $(extra_go_files_runtime)
$(BUILDDEPS)
runtime-go.lo:
$(BUILDPACKAGE)
runtime.inc: s-runtime-inc; @true
s-runtime-inc: runtime-go.lo
$(SHELL) $(srcdir)/mvifdiff.sh runtime.inc.tmp runtime.inc
$(STAMP) $@
runtime/check: $(CHECK_DEPS)
@$(CHECK)
.PHONY: runtime/check
......@@ -3874,7 +3896,7 @@ strings.lo.dep: $(srcdir)/go/strings/*.go
$(BUILDDEPS)
strings.lo:
$(BUILDPACKAGE)
strings/index.lo: go/strings/indexbyte.c
strings/index.lo: go/strings/indexbyte.c runtime.inc
@$(MKDIR_P) strings
$(LTCOMPILE) -c -o strings/index.lo $(srcdir)/go/strings/indexbyte.c
strings/check: $(CHECK_DEPS)
......@@ -4688,7 +4710,7 @@ log/syslog.lo.dep: $(srcdir)/go/log/syslog/*.go
$(BUILDDEPS)
log/syslog.lo:
$(BUILDPACKAGE)
log/syslog/syslog_c.lo: go/log/syslog/syslog_c.c log/syslog.lo
log/syslog/syslog_c.lo: go/log/syslog/syslog_c.c runtime.inc log/syslog.lo
@$(MKDIR_P) log/syslog
$(LTCOMPILE) -c -o $@ $(srcdir)/go/log/syslog/syslog_c.c
log/syslog/check: $(CHECK_DEPS)
......@@ -4970,7 +4992,7 @@ sync/atomic.lo.dep: $(srcdir)/go/sync/atomic/*.go
$(BUILDDEPS)
sync/atomic.lo:
$(BUILDPACKAGE)
sync/atomic_c.lo: go/sync/atomic/atomic.c sync/atomic.lo
sync/atomic_c.lo: go/sync/atomic/atomic.c runtime.inc sync/atomic.lo
$(LTCOMPILE) -c -o $@ $(srcdir)/go/sync/atomic/atomic.c
sync/atomic/check: $(CHECK_DEPS)
@$(CHECK)
......@@ -5049,17 +5071,17 @@ unicode/utf8/check: $(CHECK_DEPS)
.PHONY: unicode/utf8/check
@go_include@ syscall.lo.dep
syscall.lo.dep: $(srcdir)/go/syscall/*.go
syscall.lo.dep: $(srcdir)/go/syscall/*.go $(extra_go_files_syscall)
$(BUILDDEPS)
syscall.lo:
$(BUILDPACKAGE)
syscall/errno.lo: go/syscall/errno.c
syscall/errno.lo: go/syscall/errno.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/signame.lo: go/syscall/signame.c
syscall/signame.lo: go/syscall/signame.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/wait.lo: go/syscall/wait.c
syscall/wait.lo: go/syscall/wait.c runtime.inc
@$(MKDIR_P) syscall
$(LTCOMPILE) -c -o $@ $<
syscall/check: $(CHECK_DEPS)
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This is a temporary mcache.go for gccgo.
// At some point it will be replaced by the one in the gc runtime package.
import "unsafe"
const (
// Computed constant. The definition of MaxSmallSize and the
// algorithm in msize.go produces some number of different allocation
// size classes. NumSizeClasses is that number. It's needed here
// because there are static arrays of this length; when msize runs its
// size choosing algorithm it double-checks that NumSizeClasses agrees.
_NumSizeClasses = 67
)
type mcachelist struct {
list *mlink
nlist uint32
}
// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
next_sample int32 // trigger heap sample after allocating this many bytes
local_cachealloc uintptr // bytes allocated (or freed) from cache since last lock of heap
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
// tiny points to the beginning of the current tiny block, or
// nil if there is no current tiny block.
//
// tiny is a heap pointer. Since mcache is in non-GC'd memory,
// we handle it by clearing it in releaseAll during mark
// termination.
tiny unsafe.Pointer
tinysize uintptr
// The rest is not accessed on every malloc.
alloc [_NumSizeClasses]*mspan // spans to allocate from
free [_NumSizeClasses]mcachelist // lists of explicitly freed objects
// Local allocator stats, flushed during GC.
local_nlookup uintptr // number of pointer lookups
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
}
type mtypes struct {
compression byte
data uintptr
}
type special struct {
next *special
offset uint16
kind byte
}
type mspan struct {
next *mspan // next span in list, or nil if none
prev *mspan // previous span's next field, or list head's first field if none
start uintptr
npages uintptr // number of pages in span
freelist *mlink
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
// if sweepgen == h->sweepgen - 1, the span is currently being swept
// if sweepgen == h->sweepgen, the span is swept and ready to use
// h->sweepgen is incremented by 2 after every GC
sweepgen uint32
ref uint16
sizeclass uint8 // size class
incache bool // being used by an mcache
state uint8 // mspaninuse etc
needzero uint8 // needs to be zeroed before allocation
elemsize uintptr // computed from sizeclass or from npages
unusedsince int64 // first time spotted by gc in mspanfree state
npreleased uintptr // number of pages released to the os
limit uintptr // end of data in span
types mtypes
speciallock mutex // guards specials list
specials *special // linked list of special records sorted by offset.
freebuf *mlink
}
type mlink struct {
next *mlink
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Runtime type representation.
package runtime
import "unsafe"
type _type struct {
kind uint8
align int8
fieldAlign uint8
_ uint8
size uintptr
hash uint32
hashfn func(unsafe.Pointer, uintptr) uintptr
equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
gc unsafe.Pointer
string *string
*uncommonType
ptrToThis *_type
}
type method struct {
name *string
pkgPath *string
mtyp *_type
typ *_type
tfn unsafe.Pointer
}
type uncommonType struct {
name *string
pkgPath *string
methods []method
}
type imethod struct {
name *string
pkgPath *string
typ *_type
}
type interfaceType struct {
typ _type
methods []imethod
}
type mapType struct {
typ _type
key *_type
elem *_type
}
type arrayType struct {
typ _type
elem *_type
slice *_type
len uintptr
}
type chanType struct {
typ _type
elem *_type
dir uintptr
}
type slicetype struct {
typ _type
elem *_type
}
type functype struct {
typ _type
dotdotdot bool
in []*_type
out []*_type
}
type ptrtype struct {
typ _type
elem *_type
}
type structfield struct {
name *string // nil for embedded fields
pkgPath *string // nil for exported Names; otherwise import path
typ *_type // type of field
tag *string // nil if no tag
offset uintptr // byte offset of field within struct
}
type structtype struct {
typ _type
fields []structfield
}
......@@ -29,6 +29,7 @@ cat > sysinfo.c <<EOF
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <ucontext.h>
#include <netinet/in.h>
/* <netinet/tcp.h> needs u_char/u_short, but <sys/bsd_types> is only
included by <netinet/in.h> if _SGIAPI (i.e. _SGI_SOURCE
......
......@@ -36,7 +36,6 @@ void
syscall_cgocall ()
{
M* m;
G* g;
if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0))
runtime_newextram ();
......@@ -45,8 +44,7 @@ syscall_cgocall ()
m = runtime_m ();
++m->ncgocall;
g = runtime_g ();
++g->ncgo;
++m->ncgo;
runtime_entersyscall ();
}
......@@ -59,18 +57,18 @@ syscall_cgocalldone ()
g = runtime_g ();
__go_assert (g != NULL);
--g->ncgo;
if (g->ncgo == 0)
--g->m->ncgo;
if (g->m->ncgo == 0)
{
/* We are going back to Go, and we are not in a recursive call.
Let the garbage collector clean up any unreferenced
memory. */
g->cgomal = NULL;
g->m->cgomal = NULL;
}
/* If we are invoked because the C function called _cgo_panic, then
_cgo_panic will already have exited syscall mode. */
if (g->status == Gsyscall)
if (g->atomicstatus == _Gsyscall)
runtime_exitsyscall ();
runtime_unlockOSThread();
......@@ -93,7 +91,7 @@ syscall_cgocallback ()
runtime_exitsyscall ();
if (runtime_g ()->ncgo == 0)
if (runtime_m ()->ncgo == 0)
{
/* The C call to Go came from a thread not currently running any
Go. In the case of -buildmode=c-archive or c-shared, this
......@@ -119,7 +117,7 @@ syscall_cgocallbackdone ()
runtime_entersyscall ();
mp = runtime_m ();
if (mp->dropextram && runtime_g ()->ncgo == 0)
if (mp->dropextram && mp->ncgo == 0)
{
mp->dropextram = false;
runtime_dropm ();
......@@ -133,16 +131,16 @@ void *
alloc_saved (size_t n)
{
void *ret;
G *g;
M *m;
CgoMal *c;
ret = __go_alloc (n);
g = runtime_g ();
m = runtime_m ();
c = (CgoMal *) __go_alloc (sizeof (CgoMal));
c->next = g->cgomal;
c->next = m->cgomal;
c->alloc = ret;
g->cgomal = c;
m->cgomal = c;
return ret;
}
......
......@@ -9,7 +9,6 @@
#include "runtime.h"
#include "go-alloc.h"
#include "go-panic.h"
#include "go-defer.h"
/* This function is called each time we need to defer a call. */
......@@ -17,19 +16,19 @@ void
__go_defer (_Bool *frame, void (*pfn) (void *), void *arg)
{
G *g;
struct __go_defer_stack *n;
Defer *n;
g = runtime_g ();
n = runtime_newdefer ();
n->__next = g->defer;
n->__frame = frame;
n->__panic = g->panic;
n->__pfn = pfn;
n->__arg = arg;
n->__retaddr = NULL;
n->__makefunc_can_recover = 0;
n->__special = 0;
g->defer = n;
n->next = g->_defer;
n->frame = frame;
n->_panic = g->_panic;
n->pfn = (uintptr) pfn;
n->arg = arg;
n->retaddr = 0;
n->makefunccanrecover = 0;
n->special = 0;
g->_defer = n;
}
/* This function is called when we want to undefer the stack. */
......@@ -40,19 +39,19 @@ __go_undefer (_Bool *frame)
G *g;
g = runtime_g ();
while (g->defer != NULL && g->defer->__frame == frame)
while (g->_defer != NULL && g->_defer->frame == frame)
{
struct __go_defer_stack *d;
Defer *d;
void (*pfn) (void *);
d = g->defer;
pfn = d->__pfn;
d->__pfn = NULL;
d = g->_defer;
pfn = (void (*) (void *)) d->pfn;
d->pfn = 0;
if (pfn != NULL)
(*pfn) (d->__arg);
(*pfn) (d->arg);
g->defer = d->__next;
g->_defer = d->next;
/* This may be called by a cgo callback routine to defer the
call to syscall.CgocallBackDone, in which case we will not
......@@ -79,7 +78,7 @@ __go_set_defer_retaddr (void *retaddr)
G *g;
g = runtime_g ();
if (g->defer != NULL)
g->defer->__retaddr = __builtin_extract_return_addr (retaddr);
if (g->_defer != NULL)
g->_defer->retaddr = (uintptr) __builtin_extract_return_addr (retaddr);
return 0;
}
/* go-defer.h -- the defer stack.
Copyright 2010 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
struct __go_panic_stack;
/* The defer stack is a list of these structures. */
struct __go_defer_stack
{
/* The next entry in the stack. */
struct __go_defer_stack *__next;
/* The stack variable for the function which called this defer
statement. This is set to 1 if we are returning from that
function, 0 if we are panicing through it. */
_Bool *__frame;
/* The value of the panic stack when this function is deferred.
This function can not recover this value from the panic stack.
This can happen if a deferred function has a defer statement
itself. */
struct __go_panic_stack *__panic;
/* The function to call. */
void (*__pfn) (void *);
/* The argument to pass to the function. */
void *__arg;
/* The return address that a recover thunk matches against. This is
set by __go_set_defer_retaddr which is called by the thunks
created by defer statements. */
const void *__retaddr;
/* Set to true if a function created by reflect.MakeFunc is
permitted to recover. The return address of such a function
function will be somewhere in libffi, so __retaddr is not
useful. */
_Bool __makefunc_can_recover;
/* Set to true if this defer stack entry is not part of the defer
pool. */
_Bool __special;
};
......@@ -8,7 +8,6 @@
#include "runtime.h"
#include "go-panic.h"
#include "go-defer.h"
/* This is called when a call to recover is deferred. That is,
something like
......@@ -82,7 +81,7 @@ __go_deferred_recover ()
G *g;
g = runtime_g ();
if (g->defer == NULL || g->defer->__panic != g->panic)
if (g->_defer == NULL || g->_defer->_panic != g->_panic)
{
struct __go_empty_interface ret;
......
......@@ -11,23 +11,22 @@
#include "arch.h"
#include "malloc.h"
#include "go-alloc.h"
#include "go-defer.h"
#include "go-panic.h"
#include "interface.h"
/* Print the panic stack. This is used when there is no recover. */
static void
__printpanics (struct __go_panic_stack *p)
__printpanics (Panic *p)
{
if (p->__next != NULL)
if (p->next != NULL)
{
__printpanics (p->__next);
__printpanics (p->next);
runtime_printf ("\t");
}
runtime_printf ("panic: ");
runtime_printany (p->__arg);
if (p->__was_recovered)
runtime_printany (p->arg);
if (p->recovered)
runtime_printf (" [recovered]");
runtime_printf ("\n");
}
......@@ -39,39 +38,39 @@ void
__go_panic (struct __go_empty_interface arg)
{
G *g;
struct __go_panic_stack *n;
Panic *n;
g = runtime_g ();
n = (struct __go_panic_stack *) __go_alloc (sizeof (struct __go_panic_stack));
n->__arg = arg;
n->__next = g->panic;
g->panic = n;
n = (Panic *) __go_alloc (sizeof (Panic));
n->arg = arg;
n->next = g->_panic;
g->_panic = n;
/* Run all the defer functions. */
while (1)
{
struct __go_defer_stack *d;
Defer *d;
void (*pfn) (void *);
d = g->defer;
d = g->_defer;
if (d == NULL)
break;
pfn = d->__pfn;
d->__pfn = NULL;
pfn = (void (*) (void *)) d->pfn;
d->pfn = 0;
if (pfn != NULL)
{
(*pfn) (d->__arg);
(*pfn) (d->arg);
if (n->__was_recovered)
if (n->recovered)
{
/* Some defer function called recover. That means that
we should stop running this panic. */
g->panic = n->__next;
g->_panic = n->next;
__go_free (n);
/* Now unwind the stack by throwing an exception. The
......@@ -91,10 +90,10 @@ __go_panic (struct __go_empty_interface arg)
it did not call recover, we know that we are not
returning from the calling function--we are panicing
through it. */
*d->__frame = 0;
*d->frame = 0;
}
g->defer = d->__next;
g->_defer = d->next;
/* This may be called by a cgo callback routine to defer the
call to syscall.CgocallBackDone, in which case we will not
......@@ -107,6 +106,6 @@ __go_panic (struct __go_empty_interface arg)
/* The panic was not recovered. */
runtime_startpanic ();
__printpanics (g->panic);
__printpanics (g->_panic);
runtime_dopanic (0);
}
......@@ -11,25 +11,6 @@
struct String;
struct __go_type_descriptor;
struct __go_defer_stack;
/* The stack of panic calls. */
struct __go_panic_stack
{
/* The next entry in the stack. */
struct __go_panic_stack *__next;
/* The value associated with this panic. */
struct __go_empty_interface __arg;
/* Whether this panic has been recovered. */
_Bool __was_recovered;
/* Whether this panic was pushed on the stack because of an
exception thrown in some other language. */
_Bool __is_foreign;
};
extern void __go_panic (struct __go_empty_interface)
__attribute__ ((noreturn));
......@@ -42,8 +23,8 @@ extern _Bool __go_can_recover (void *);
extern void __go_makefunc_can_recover (void *retaddr);
struct Location;
extern void __go_makefunc_ffi_can_recover (struct Location *, int);
struct location;
extern void __go_makefunc_ffi_can_recover (struct location *, int);
extern void __go_makefunc_returning (void);
......
......@@ -7,33 +7,32 @@
#include "runtime.h"
#include "interface.h"
#include "go-panic.h"
#include "go-defer.h"
/* If the top of the defer stack can be recovered, then return it.
Otherwise return NULL. */
static struct __go_defer_stack *
static Defer *
current_defer ()
{
G *g;
struct __go_defer_stack *d;
Defer *d;
g = runtime_g ();
d = g->defer;
d = g->_defer;
if (d == NULL)
return NULL;
/* The panic which would be recovered is the one on the top of the
panic stack. We do not want to recover it if that panic was on
the top of the panic stack when this function was deferred. */
if (d->__panic == g->panic)
if (d->_panic == g->_panic)
return NULL;
/* The deferred thunk will call _go_set_defer_retaddr. If this has
not happened, then we have not been called via defer, and we can
not recover. */
if (d->__retaddr == NULL)
if (d->retaddr == 0)
return NULL;
return d;
......@@ -48,7 +47,7 @@ current_defer ()
_Bool
__go_can_recover (void *retaddr)
{
struct __go_defer_stack *d;
Defer *d;
const char* ret;
const char* dret;
Location locs[16];
......@@ -64,7 +63,7 @@ __go_can_recover (void *retaddr)
ret = (const char *) __builtin_extract_return_addr (retaddr);
dret = (const char *) d->__retaddr;
dret = (const char *) (uintptr) d->retaddr;
if (ret <= dret && ret + 16 >= dret)
return 1;
......@@ -111,7 +110,7 @@ __go_can_recover (void *retaddr)
/* If the function calling recover was created by reflect.MakeFunc,
then __go_makefunc_can_recover or __go_makefunc_ffi_can_recover
will have set the __makefunc_can_recover field. */
if (!d->__makefunc_can_recover)
if (!d->makefunccanrecover)
return 0;
/* We look up the stack, ignoring libffi functions and functions in
......@@ -178,7 +177,7 @@ __go_can_recover (void *retaddr)
void
__go_makefunc_can_recover (void *retaddr)
{
struct __go_defer_stack *d;
Defer *d;
d = current_defer ();
if (d == NULL)
......@@ -186,24 +185,24 @@ __go_makefunc_can_recover (void *retaddr)
/* If we are already in a call stack of MakeFunc functions, there is
nothing we can usefully check here. */
if (d->__makefunc_can_recover)
if (d->makefunccanrecover)
return;
if (__go_can_recover (retaddr))
d->__makefunc_can_recover = 1;
d->makefunccanrecover = 1;
}
/* This function is called when code is about to enter a function
created by the libffi version of reflect.MakeFunc. This function
is passed the names of the callers of the libffi code that called
the stub. It uses to decide whether it is permitted to call
recover, and sets d->__makefunc_can_recover so that __go_recover
can make the same decision. */
recover, and sets d->makefunccanrecover so that __go_recover can
make the same decision. */
void
__go_makefunc_ffi_can_recover (struct Location *loc, int n)
__go_makefunc_ffi_can_recover (struct location *loc, int n)
{
struct __go_defer_stack *d;
Defer *d;
const byte *name;
intgo len;
......@@ -213,7 +212,7 @@ __go_makefunc_ffi_can_recover (struct Location *loc, int n)
/* If we are already in a call stack of MakeFunc functions, there is
nothing we can usefully check here. */
if (d->__makefunc_can_recover)
if (d->makefunccanrecover)
return;
/* LOC points to the caller of our caller. That will be a thunk.
......@@ -228,26 +227,26 @@ __go_makefunc_ffi_can_recover (struct Location *loc, int n)
if (len > 4
&& __builtin_strchr ((const char *) name, '.') == NULL
&& __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
d->__makefunc_can_recover = 1;
d->makefunccanrecover = 1;
}
/* This function is called when code is about to exit a function
created by reflect.MakeFunc. It is called by the function stub
used by MakeFunc. It clears the __makefunc_can_recover field.
It's OK to always clear this field, because __go_can_recover will
only be called by a stub created for a function that calls recover.
That stub will not call a function created by reflect.MakeFunc, so
by the time we get here any caller higher up on the call stack no
used by MakeFunc. It clears the makefunccanrecover field. It's OK
to always clear this field, because __go_can_recover will only be
called by a stub created for a function that calls recover. That
stub will not call a function created by reflect.MakeFunc, so by
the time we get here any caller higher up on the call stack no
longer needs the information. */
void
__go_makefunc_returning (void)
{
struct __go_defer_stack *d;
Defer *d;
d = runtime_g ()->defer;
d = runtime_g ()->_defer;
if (d != NULL)
d->__makefunc_can_recover = 0;
d->makefunccanrecover = 0;
}
/* This is only called when it is valid for the caller to recover the
......@@ -257,11 +256,11 @@ struct __go_empty_interface
__go_recover ()
{
G *g;
struct __go_panic_stack *p;
Panic *p;
g = runtime_g ();
if (g->panic == NULL || g->panic->__was_recovered)
if (g->_panic == NULL || g->_panic->recovered)
{
struct __go_empty_interface ret;
......@@ -269,7 +268,7 @@ __go_recover ()
ret.__object = NULL;
return ret;
}
p = g->panic;
p->__was_recovered = 1;
return p->__arg;
p = g->_panic;
p->recovered = 1;
return p->arg;
}
......@@ -26,11 +26,11 @@ extern void __splitstack_setcontext(void *context[10]);
#endif
#define N SigNotify
#define K SigKill
#define T SigThrow
#define P SigPanic
#define D SigDefault
#define N _SigNotify
#define K _SigKill
#define T _SigThrow
#define P _SigPanic
#define D _SigDefault
/* Signal actions. This collects the sigtab tables for several
different targets from the master library. SIGKILL and SIGSTOP are
......@@ -182,14 +182,14 @@ runtime_sighandler (int sig, Siginfo *info,
#ifdef SA_SIGINFO
notify = info != NULL && info->si_code == SI_USER;
#endif
if (notify || (t->flags & SigNotify) != 0)
if (notify || (t->flags & _SigNotify) != 0)
{
if (__go_sigsend (sig))
return;
}
if ((t->flags & SigKill) != 0)
if ((t->flags & _SigKill) != 0)
runtime_exit (2);
if ((t->flags & SigThrow) == 0)
if ((t->flags & _SigThrow) == 0)
return;
runtime_startpanic ();
......@@ -320,7 +320,7 @@ sig_panic_info_handler (int sig, Siginfo *info, void *context)
#endif
}
/* All signals with SigPanic should be in cases above, and this
/* All signals with _SigPanic should be in cases above, and this
handler should only be invoked for those signals. */
__builtin_unreachable ();
}
......@@ -365,7 +365,7 @@ sig_panic_handler (int sig)
#endif
}
/* All signals with SigPanic should be in cases above, and this
/* All signals with _SigPanic should be in cases above, and this
handler should only be invoked for those signals. */
__builtin_unreachable ();
}
......@@ -406,7 +406,7 @@ sig_tramp_info (int sig, Siginfo *info, void *context)
/* We are running on the signal stack. Set the split stack
context so that the stack guards are checked correctly. */
#ifdef USING_SPLIT_STACK
__splitstack_setcontext (&mp->gsignal->stack_context[0]);
__splitstack_setcontext (&mp->gsignal->stackcontext[0]);
#endif
}
......@@ -451,7 +451,7 @@ runtime_setsig (int32 i, GoSighandler *fn, bool restart)
t = &runtime_sigtab[i];
if ((t->flags & SigPanic) == 0)
if ((t->flags & _SigPanic) == 0)
{
#ifdef SA_SIGINFO
sa.sa_flags = SA_ONSTACK | SA_SIGINFO;
......
......@@ -15,7 +15,6 @@
#include "runtime.h"
#include "go-alloc.h"
#include "go-defer.h"
#include "go-panic.h"
/* The code for a Go exception. */
......@@ -57,43 +56,42 @@ __go_check_defer (_Bool *frame)
/* Some other language has thrown an exception. We know there
are no defer handlers, so there is nothing to do. */
}
else if (g->is_foreign)
else if (g->isforeign)
{
struct __go_panic_stack *n;
_Bool was_recovered;
Panic *n;
_Bool recovered;
/* Some other language has thrown an exception. We need to run
the local defer handlers. If they call recover, we stop
unwinding the stack here. */
n = ((struct __go_panic_stack *)
__go_alloc (sizeof (struct __go_panic_stack)));
n = (Panic *) __go_alloc (sizeof (Panic));
n->__arg.__type_descriptor = NULL;
n->__arg.__object = NULL;
n->__was_recovered = 0;
n->__is_foreign = 1;
n->__next = g->panic;
g->panic = n;
n->arg.__type_descriptor = NULL;
n->arg.__object = NULL;
n->recovered = 0;
n->isforeign = 1;
n->next = g->_panic;
g->_panic = n;
while (1)
{
struct __go_defer_stack *d;
Defer *d;
void (*pfn) (void *);
d = g->defer;
if (d == NULL || d->__frame != frame || d->__pfn == NULL)
d = g->_defer;
if (d == NULL || d->frame != frame || d->pfn == 0)
break;
pfn = d->__pfn;
g->defer = d->__next;
pfn = (void (*) (void *)) d->pfn;
g->_defer = d->next;
(*pfn) (d->__arg);
(*pfn) (d->arg);
if (runtime_m () != NULL)
runtime_freedefer (d);
if (n->__was_recovered)
if (n->recovered)
{
/* The recover function caught the panic thrown by some
other language. */
......@@ -101,11 +99,11 @@ __go_check_defer (_Bool *frame)
}
}
was_recovered = n->__was_recovered;
g->panic = n->__next;
recovered = n->recovered;
g->_panic = n->next;
__go_free (n);
if (was_recovered)
if (recovered)
{
/* Just return and continue executing Go code. */
*frame = 1;
......@@ -115,17 +113,17 @@ __go_check_defer (_Bool *frame)
/* We are panicing through this function. */
*frame = 0;
}
else if (g->defer != NULL
&& g->defer->__pfn == NULL
&& g->defer->__frame == frame)
else if (g->_defer != NULL
&& g->_defer->pfn == 0
&& g->_defer->frame == frame)
{
struct __go_defer_stack *d;
Defer *d;
/* This is the defer function which called recover. Simply
return to stop the stack unwind, and let the Go code continue
to execute. */
d = g->defer;
g->defer = d->__next;
d = g->_defer;
g->_defer = d->next;
if (runtime_m () != NULL)
runtime_freedefer (d);
......@@ -432,7 +430,7 @@ PERSONALITY_FUNCTION (int version,
else
{
g->exception = ue_header;
g->is_foreign = is_foreign;
g->isforeign = is_foreign;
}
_Unwind_SetGR (context, __builtin_eh_return_data_regno (0),
......
......@@ -14,7 +14,6 @@
#include "malloc.h"
#include "mgc0.h"
#include "go-type.h"
#include "go-defer.h"
#include "go-panic.h"
#define hash __hash
......@@ -265,15 +264,15 @@ dumpgoroutine(G *gp)
dumpint((uintptr)0);
dumpint(gp->goid);
dumpint(gp->gopc);
dumpint(gp->status);
dumpint(gp->atomicstatus);
dumpbool(gp->issystem);
dumpbool(gp->isbackground);
dumpint(gp->waitsince);
dumpcstr((const int8 *)gp->waitreason);
dumpstr(gp->waitreason);
dumpint((uintptr)0);
dumpint((uintptr)gp->m);
dumpint((uintptr)gp->defer);
dumpint((uintptr)gp->panic);
dumpint((uintptr)gp->_defer);
dumpint((uintptr)gp->_panic);
// dump stack
// child.args.n = -1;
......@@ -285,24 +284,24 @@ dumpgoroutine(G *gp)
// runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
// dump defer & panic records
for(d = gp->defer; d != nil; d = d->__next) {
for(d = gp->_defer; d != nil; d = d->next) {
dumpint(TagDefer);
dumpint((uintptr)d);
dumpint((uintptr)gp);
dumpint((uintptr)d->__arg);
dumpint((uintptr)d->__frame);
dumpint((uintptr)d->__pfn);
dumpint((uintptr)d->arg);
dumpint((uintptr)d->frame);
dumpint((uintptr)d->pfn);
dumpint((uintptr)0);
dumpint((uintptr)d->__next);
dumpint((uintptr)d->next);
}
for (p = gp->panic; p != nil; p = p->__next) {
for (p = gp->_panic; p != nil; p = p->next) {
dumpint(TagPanic);
dumpint((uintptr)p);
dumpint((uintptr)gp);
dumpint((uintptr)p->__arg.__type_descriptor);
dumpint((uintptr)p->__arg.__object);
dumpint((uintptr)p->arg.__type_descriptor);
dumpint((uintptr)p->arg.__object);
dumpint((uintptr)0);
dumpint((uintptr)p->__next);
dumpint((uintptr)p->next);
}
}
......@@ -315,15 +314,15 @@ dumpgs(void)
// goroutines & stacks
for(i = 0; i < runtime_allglen; i++) {
gp = runtime_allg[i];
switch(gp->status){
switch(gp->atomicstatus){
default:
runtime_printf("unexpected G.status %d\n", gp->status);
runtime_printf("unexpected G.status %d\n", gp->atomicstatus);
runtime_throw("mark - bad status");
case Gdead:
case _Gdead:
break;
case Grunnable:
case Gsyscall:
case Gwaiting:
case _Grunnable:
case _Gsyscall:
case _Gwaiting:
dumpgoroutine(gp);
break;
}
......@@ -602,7 +601,7 @@ mdump(G *gp)
flush();
gp->param = nil;
gp->status = Grunning;
gp->atomicstatus = _Grunning;
runtime_gogo(gp);
}
......@@ -632,8 +631,8 @@ runtime_debug_WriteHeapDump(uintptr fd)
// Call dump routine on M stack.
g = runtime_g();
g->status = Gwaiting;
g->waitreason = "dumping heap";
g->atomicstatus = _Gwaiting;
g->waitreason = runtime_gostringnocopy((const byte*)"dumping heap");
runtime_mcall(mdump);
// Reset dump file.
......
......@@ -73,7 +73,7 @@ unlocked:
// for this lock, chained through m->nextwaitm.
// Queue this M.
for(;;) {
m->nextwaitm = (void*)(v&~LOCKED);
m->nextwaitm = v&~LOCKED;
if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
break;
v = (uintptr)runtime_atomicloadp((void**)&l->key);
......@@ -104,7 +104,7 @@ runtime_unlock(Lock *l)
// Other M's are waiting for the lock.
// Dequeue an M.
mp = (void*)(v&~LOCKED);
if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
if(runtime_cas(&l->key, v, mp->nextwaitm)) {
// Dequeued an M. Wake it.
runtime_semawakeup(mp);
break;
......
......@@ -92,11 +92,11 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
return &runtime_zerobase;
}
m = runtime_m();
g = runtime_g();
m = g->m;
incallback = false;
if(m->mcache == nil && g->ncgo > 0) {
if(m->mcache == nil && m->ncgo > 0) {
// For gccgo this case can occur when a cgo or SWIG function
// has an interface return type and the function
// returns a non-pointer, so memory allocation occurs
......@@ -165,11 +165,11 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
tiny = (byte*)ROUND((uintptr)tiny, 4);
else if((size&1) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 2);
size1 = size + (tiny - c->tiny);
size1 = size + (tiny - (byte*)c->tiny);
if(size1 <= tinysize) {
// The object fits into existing tiny block.
v = (MLink*)tiny;
c->tiny += size1;
c->tiny = (byte*)c->tiny + size1;
c->tinysize -= size1;
m->mallocing = 0;
m->locks--;
......@@ -281,7 +281,7 @@ largealloc(uint32 flag, uintptr *sizep)
s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
if(s == nil)
runtime_throw("out of memory");
s->limit = (byte*)(s->start<<PageShift) + size;
s->limit = (uintptr)((byte*)(s->start<<PageShift) + size);
*sizep = npages<<PageShift;
v = (void*)(s->start << PageShift);
// setup for mark sweep
......@@ -475,7 +475,7 @@ runtime_purgecachedstats(MCache *c)
// Protected by either heap or GC lock.
h = &runtime_mheap;
mstats.heap_alloc += c->local_cachealloc;
mstats.heap_alloc += (intptr)c->local_cachealloc;
c->local_cachealloc = 0;
mstats.nlookup += c->local_nlookup;
c->local_nlookup = 0;
......@@ -493,7 +493,7 @@ extern uintptr runtime_sizeof_C_MStats
__asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
// Size of the trailing by_size array differs between Go and C,
// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
// _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
// sizeof_C_MStats is what C thinks about size of Go struct.
// Initialized in mallocinit because it's defined in go/runtime/mem.go.
......@@ -511,7 +511,7 @@ runtime_mallocinit(void)
uint64 i;
bool reserved;
runtime_sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
runtime_sizeof_C_MStats = sizeof(MStats) - (_NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
p = nil;
p_size = 0;
......
......@@ -82,11 +82,11 @@
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
typedef struct MSpan MSpan;
typedef struct mspan MSpan;
typedef struct MStats MStats;
typedef struct MLink MLink;
typedef struct MTypes MTypes;
typedef struct GCStats GCStats;
typedef struct mlink MLink;
typedef struct mtypes MTypes;
typedef struct gcstats GCStats;
enum
{
......@@ -100,10 +100,10 @@ enum
{
// Computed constant. The definition of MaxSmallSize and the
// algorithm in msize.c produce some number of different allocation
// size classes. NumSizeClasses is that number. It's needed here
// size classes. _NumSizeClasses is that number. It's needed here
// because there are static arrays of this length; when msize runs its
// size choosing algorithm it double-checks that NumSizeClasses agrees.
NumSizeClasses = 67,
// _NumSizeClasses is defined in runtime2.go as 67.
// Tunable constants.
MaxSmallSize = 32<<10,
......@@ -148,13 +148,6 @@ enum
#else
#define MaxMem ((uintptr)-1)
#endif
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
struct MLink
{
MLink *next;
};
// SysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
......@@ -274,7 +267,7 @@ struct MStats
uint32 size;
uint64 nmalloc;
uint64 nfree;
} by_size[NumSizeClasses];
} by_size[_NumSizeClasses];
};
extern MStats mstats
......@@ -284,7 +277,7 @@ void runtime_updatememstats(GCStats *stats);
// Size classes. Computed and initialized by InitSizes.
//
// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
// 1 <= sizeclass < NumSizeClasses, for n.
// 1 <= sizeclass < _NumSizeClasses, for n.
// Size class 0 is reserved to mean "not small".
//
// class_to_size[i] = largest size in class i
......@@ -293,41 +286,14 @@ void runtime_updatememstats(GCStats *stats);
int32 runtime_SizeToClass(int32);
uintptr runtime_roundupsize(uintptr);
extern int32 runtime_class_to_size[NumSizeClasses];
extern int32 runtime_class_to_allocnpages[NumSizeClasses];
extern int32 runtime_class_to_size[_NumSizeClasses];
extern int32 runtime_class_to_allocnpages[_NumSizeClasses];
extern int8 runtime_size_to_class8[1024/8 + 1];
extern int8 runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
extern void runtime_InitSizes(void);
typedef struct MCacheList MCacheList;
struct MCacheList
{
MLink *list;
uint32 nlist;
};
// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
struct MCache
{
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.goc.
byte* tiny;
uintptr tinysize;
// The rest is not accessed on every malloc.
MSpan* alloc[NumSizeClasses]; // spans to allocate from
MCacheList free[NumSizeClasses];// lists of explicitly freed objects
// Local allocator stats, flushed during GC.
uintptr local_nlookup; // number of pointer lookups
uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
uintptr local_nlargefree; // number of frees for large objects (>MaxSmallSize)
uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
typedef struct mcachelist MCacheList;
MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass);
void runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
......@@ -364,11 +330,6 @@ enum
MTypes_Words = 2,
MTypes_Bytes = 3,
};
struct MTypes
{
byte compression; // one of MTypes_*
uintptr data;
};
enum
{
......@@ -380,13 +341,7 @@ enum
// if that happens.
};
typedef struct Special Special;
struct Special
{
Special* next; // linked list in span
uint16 offset; // span offset of object
byte kind; // kind of Special
};
typedef struct special Special;
// The described object has a finalizer set for it.
typedef struct SpecialFinalizer SpecialFinalizer;
......@@ -415,33 +370,6 @@ enum
MSpanListHead,
MSpanDead,
};
struct MSpan
{
MSpan *next; // in a span linked list
MSpan *prev; // in a span linked list
PageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
// if sweepgen == h->sweepgen - 1, the span is currently being swept
// if sweepgen == h->sweepgen, the span is swept and ready to use
// h->sweepgen is incremented by 2 after every GC
uint32 sweepgen;
uint16 ref; // capacity - number of objects in freelist
uint8 sizeclass; // size class
bool incache; // being used by an MCache
uint8 state; // MSpanInUse etc
uint8 needzero; // needs to be zeroed before allocation
uintptr elemsize; // computed from sizeclass or from npages
int64 unusedsince; // First time spotted by GC in MSpanFree state
uintptr npreleased; // number of pages released to the OS
byte *limit; // end of data in span
MTypes types; // types of allocated objects in this span
Lock specialLock; // guards specials list
Special *specials; // linked list of special records sorted by offset.
MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
};
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
void runtime_MSpan_EnsureSwept(MSpan *span);
......@@ -509,7 +437,7 @@ struct MHeap
struct {
MCentral;
byte pad[64];
} central[NumSizeClasses];
} central[_NumSizeClasses];
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
......@@ -520,7 +448,7 @@ struct MHeap
// Malloc stats.
uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
uint64 nlargefree; // number of frees for large objects (>MaxSmallSize)
uint64 nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
uint64 nsmallfree[_NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
extern MHeap runtime_mheap;
......
......@@ -27,7 +27,7 @@ runtime_allocmcache(void)
c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
runtime_unlock(&runtime_mheap);
runtime_memclr((byte*)c, sizeof(*c));
for(i = 0; i < NumSizeClasses; i++)
for(i = 0; i < _NumSizeClasses; i++)
c->alloc[i] = &emptymspan;
// Set first allocation sample size.
......@@ -115,7 +115,7 @@ runtime_MCache_ReleaseAll(MCache *c)
MSpan *s;
MCacheList *l;
for(i=0; i<NumSizeClasses; i++) {
for(i=0; i<_NumSizeClasses; i++) {
s = c->alloc[i];
if(s != &emptymspan) {
runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
......
......@@ -272,7 +272,7 @@ MCentral_Grow(MCentral *c)
// Carve span into sequence of blocks.
tailp = &s->freelist;
p = (byte*)(s->start << PageShift);
s->limit = p + size*n;
s->limit = (uintptr)(p + size*n);
for(i=0; i<n; i++) {
v = (MLink*)p;
*tailp = v;
......
......@@ -321,7 +321,7 @@ markonly(const void *obj)
x = k;
x -= (uintptr)runtime_mheap.arena_start>>PageShift;
s = runtime_mheap.spans[x];
if(s == nil || k < s->start || (const byte*)obj >= s->limit || s->state != MSpanInUse)
if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
return false;
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
......@@ -517,7 +517,7 @@ flushptrbuf(Scanbuf *sbuf)
x = k;
x -= (uintptr)arena_start>>PageShift;
s = runtime_mheap.spans[x];
if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse)
if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
continue;
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
......@@ -651,8 +651,8 @@ static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
static uintptr chanProg[2] = {0, GC_CHAN};
// Local variables of a program fragment or loop
typedef struct Frame Frame;
struct Frame {
typedef struct GCFrame GCFrame;
struct GCFrame {
uintptr count, elemsize, b;
const uintptr *loop_or_ret;
};
......@@ -731,7 +731,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
const Type *t, *et;
Slice *sliceptr;
String *stringptr;
Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
GCFrame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
BufferList *scanbuffers;
Scanbuf sbuf;
Eface *eface;
......@@ -1057,7 +1057,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
// Stack push.
*stack_ptr-- = stack_top;
stack_top = (Frame){count, elemsize, i, pc};
stack_top = (GCFrame){count, elemsize, i, pc};
continue;
case GC_ARRAY_NEXT:
......@@ -1074,7 +1074,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_CALL:
// Stack push.
*stack_ptr-- = stack_top;
stack_top = (Frame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
stack_top = (GCFrame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
pc = (const uintptr*)((const byte*)pc + *(const int32*)(pc+2)); // target of the CALL instruction
continue;
......@@ -1357,7 +1357,7 @@ markroot(ParFor *desc, uint32 i)
gp = runtime_allg[i - RootCount];
// remember when we've first observed the G blocked
// needed only to output in traceback
if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince == 0)
if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince == 0)
gp->waitsince = work.tstart;
addstackroots(gp, &wbuf);
break;
......@@ -1472,17 +1472,17 @@ handoff(Workbuf *b)
static void
addstackroots(G *gp, Workbuf **wbufp)
{
switch(gp->status){
switch(gp->atomicstatus){
default:
runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp->status, gp, gp->goid);
runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp->atomicstatus, gp, gp->goid);
runtime_throw("mark - bad status");
case Gdead:
case _Gdead:
return;
case Grunning:
case _Grunning:
runtime_throw("mark - world not stopped");
case Grunnable:
case Gsyscall:
case Gwaiting:
case _Grunnable:
case _Gsyscall:
case _Gwaiting:
break;
}
......@@ -1512,12 +1512,12 @@ addstackroots(G *gp, Workbuf **wbufp)
// the system call instead, since that won't change underfoot.
if(gp->gcstack != nil) {
sp = gp->gcstack;
spsize = gp->gcstack_size;
next_segment = gp->gcnext_segment;
next_sp = gp->gcnext_sp;
initial_sp = gp->gcinitial_sp;
spsize = gp->gcstacksize;
next_segment = gp->gcnextsegment;
next_sp = gp->gcnextsp;
initial_sp = gp->gcinitialsp;
} else {
sp = __splitstack_find_context(&gp->stack_context[0],
sp = __splitstack_find_context(&gp->stackcontext[0],
&spsize, &next_segment,
&next_sp, &initial_sp);
}
......@@ -1543,11 +1543,11 @@ addstackroots(G *gp, Workbuf **wbufp)
} else {
// Scanning another goroutine's stack.
// The goroutine is usually asleep (the world is stopped).
bottom = (byte*)gp->gcnext_sp;
bottom = (byte*)gp->gcnextsp;
if(bottom == nil)
return;
}
top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
top = (byte*)gp->gcinitialsp + gp->gcstacksize;
if(top > bottom)
enqueue1(wbufp, (Obj){bottom, top - bottom, 0});
else
......@@ -2186,8 +2186,8 @@ runtime_gc(int32 force)
// switch to g0, call gc(&a), then switch back
g = runtime_g();
g->param = &a;
g->status = Gwaiting;
g->waitreason = "garbage collection";
g->atomicstatus = _Gwaiting;
g->waitreason = runtime_gostringnocopy((const byte*)"garbage collection");
runtime_mcall(mgc);
m = runtime_m();
}
......@@ -2214,7 +2214,7 @@ mgc(G *gp)
{
gc(gp->param);
gp->param = nil;
gp->status = Grunning;
gp->atomicstatus = _Grunning;
runtime_gogo(gp);
}
......@@ -2404,7 +2404,7 @@ runtime_ReadMemStats(MStats *stats)
runtime_stoptheworld();
runtime_updatememstats(nil);
// Size of the trailing by_size array differs between Go and C,
// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
// _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
runtime_memmove(stats, &mstats, runtime_sizeof_C_MStats);
m->gcing = 0;
m->locks++;
......
......@@ -176,7 +176,7 @@ runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool n
MSpan *s;
runtime_lock(h);
mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
......@@ -377,7 +377,7 @@ runtime_MHeap_LookupMaybe(MHeap *h, void *v)
q = p;
q -= (uintptr)h->arena_start >> PageShift;
s = h->spans[q];
if(s == nil || p < s->start || (byte*)v >= s->limit || s->state != MSpanInUse)
if(s == nil || p < s->start || (uintptr)v >= s->limit || s->state != MSpanInUse)
return nil;
return s;
}
......@@ -387,7 +387,7 @@ void
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
runtime_lock(h);
mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
mstats.heap_inuse -= s->npages<<PageShift;
if(acct) {
......@@ -597,7 +597,7 @@ runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->unusedsince = 0;
span->npreleased = 0;
span->types.compression = MTypes_Empty;
span->specialLock.key = 0;
span->speciallock.key = 0;
span->specials = nil;
span->needzero = 0;
span->freebuf = nil;
......@@ -681,13 +681,13 @@ addspecial(void *p, Special *s)
offset = (uintptr)p - (span->start << PageShift);
kind = s->kind;
runtime_lock(&span->specialLock);
runtime_lock(&span->speciallock);
// Find splice point, check for existing record.
t = &span->specials;
while((x = *t) != nil) {
if(offset == x->offset && kind == x->kind) {
runtime_unlock(&span->specialLock);
runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return false; // already exists
}
......@@ -699,7 +699,7 @@ addspecial(void *p, Special *s)
s->offset = offset;
s->next = x;
*t = s;
runtime_unlock(&span->specialLock);
runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return true;
}
......@@ -725,20 +725,20 @@ removespecial(void *p, byte kind)
offset = (uintptr)p - (span->start << PageShift);
runtime_lock(&span->specialLock);
runtime_lock(&span->speciallock);
t = &span->specials;
while((s = *t) != nil) {
// This function is used for finalizers only, so we don't check for
// "interior" specials (p must be exactly equal to s->offset).
if(offset == s->offset && kind == s->kind) {
*t = s->next;
runtime_unlock(&span->specialLock);
runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return s;
}
t = &s->next;
}
runtime_unlock(&span->specialLock);
runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return nil;
}
......@@ -838,7 +838,7 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
// this is required to not cause deadlock between span->specialLock and proflock
list = nil;
offset = (uintptr)p - (span->start << PageShift);
runtime_lock(&span->specialLock);
runtime_lock(&span->speciallock);
t = &span->specials;
while((s = *t) != nil) {
if(offset + size <= s->offset)
......@@ -850,7 +850,7 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
} else
t = &s->next;
}
runtime_unlock(&span->specialLock);
runtime_unlock(&span->speciallock);
while(list != nil) {
s = list;
......@@ -908,7 +908,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
// Allocate a new span for the first half.
t = runtime_FixAlloc_Alloc(&h->spanalloc);
runtime_MSpan_Init(t, s->start, npages/2);
t->limit = (byte*)((t->start + npages/2) << PageShift);
t->limit = (uintptr)((t->start + npages/2) << PageShift);
t->state = MSpanInUse;
t->elemsize = npages << (PageShift - 1);
t->sweepgen = s->sweepgen;
......
......@@ -479,7 +479,7 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
saveg(g, r++);
for(i = 0; i < runtime_allglen; i++) {
gp = runtime_allg[i];
if(gp == g || gp->status == Gdead)
if(gp == g || gp->atomicstatus == _Gdead)
continue;
saveg(gp, r++);
}
......
......@@ -29,8 +29,8 @@
#include "arch.h"
#include "malloc.h"
int32 runtime_class_to_size[NumSizeClasses];
int32 runtime_class_to_allocnpages[NumSizeClasses];
int32 runtime_class_to_size[_NumSizeClasses];
int32 runtime_class_to_allocnpages[_NumSizeClasses];
// The SizeToClass lookup is implemented using two arrays,
// one mapping sizes <= 1024 to their class and one mapping
......@@ -101,14 +101,14 @@ runtime_InitSizes(void)
runtime_class_to_size[sizeclass] = size;
sizeclass++;
}
if(sizeclass != NumSizeClasses) {
runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
runtime_throw("InitSizes - bad NumSizeClasses");
if(sizeclass != _NumSizeClasses) {
runtime_printf("sizeclass=%d _NumSizeClasses=%d\n", sizeclass, _NumSizeClasses);
runtime_throw("InitSizes - bad _NumSizeClasses");
}
// Initialize the size_to_class tables.
nextsize = 0;
for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
for (sizeclass = 1; sizeclass < _NumSizeClasses; sizeclass++) {
for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8)
runtime_size_to_class8[nextsize/8] = sizeclass;
if(nextsize >= 1024)
......@@ -120,7 +120,7 @@ runtime_InitSizes(void)
if(0) {
for(n=0; n < MaxSmallSize; n++) {
sizeclass = runtime_SizeToClass(n);
if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
if(sizeclass < 1 || sizeclass >= _NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
runtime_printf("incorrect SizeToClass");
goto dump;
......@@ -140,9 +140,9 @@ runtime_InitSizes(void)
dump:
if(1){
runtime_printf("NumSizeClasses=%d\n", NumSizeClasses);
runtime_printf("NumSizeClasses=%d\n", _NumSizeClasses);
runtime_printf("runtime_class_to_size:");
for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
for(sizeclass=0; sizeclass<_NumSizeClasses; sizeclass++)
runtime_printf(" %d", runtime_class_to_size[sizeclass]);
runtime_printf("\n\n");
runtime_printf("size_to_class8:");
......
......@@ -301,11 +301,11 @@ runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
if(mode == 'w' || mode == 'r'+'w')
wg = netpollunblock(pd, 'w', true);
if(rg) {
rg->schedlink = *gpp;
rg->schedlink = (uintptr)*gpp;
*gpp = rg;
}
if(wg) {
wg->schedlink = *gpp;
wg->schedlink = (uintptr)*gpp;
*gpp = wg;
}
}
......
......@@ -4,7 +4,6 @@
#include "runtime.h"
#include "malloc.h"
#include "go-defer.h"
#include "go-panic.h"
// Code related to defer, panic and recover.
......@@ -21,10 +20,10 @@ runtime_newdefer()
P *p;
d = nil;
p = runtime_m()->p;
p = (P*)runtime_m()->p;
d = p->deferpool;
if(d)
p->deferpool = d->__next;
p->deferpool = d->next;
if(d == nil) {
// deferpool is empty
d = runtime_malloc(sizeof(Defer));
......@@ -39,10 +38,10 @@ runtime_freedefer(Defer *d)
{
P *p;
if(d->__special)
if(d->special)
return;
p = runtime_m()->p;
d->__next = p->deferpool;
p = (P*)runtime_m()->p;
d->next = p->deferpool;
p->deferpool = d;
// No need to wipe out pointers in argp/pc/fn/args,
// because we empty the pool before GC.
......@@ -58,14 +57,14 @@ __go_rundefer(void)
Defer *d;
g = runtime_g();
while((d = g->defer) != nil) {
while((d = g->_defer) != nil) {
void (*pfn)(void*);
g->defer = d->__next;
pfn = d->__pfn;
d->__pfn = nil;
g->_defer = d->next;
pfn = (void (*) (void *))d->pfn;
d->pfn = 0;
if (pfn != nil)
(*pfn)(d->__arg);
(*pfn)(d->arg);
runtime_freedefer(d);
}
}
......@@ -171,7 +170,7 @@ runtime_canpanic(G *gp)
return false;
if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
return false;
if(gp->status != Grunning)
if(gp->atomicstatus != _Grunning)
return false;
#ifdef GOOS_windows
if(m->libcallsp != 0)
......
......@@ -272,7 +272,8 @@ runtime_tickspersecond(void)
void
runtime_mpreinit(M *mp)
{
mp->gsignal = runtime_malg(32*1024, &mp->gsignalstack, &mp->gsignalstacksize); // OS X wants >=8K, Linux >=2K
mp->gsignal = runtime_malg(32*1024, (byte**)&mp->gsignalstack, &mp->gsignalstacksize); // OS X wants >=8K, Linux >=2K
mp->gsignal->m = mp;
}
// Called to initialize a new m (including the bootstrap m).
......
......@@ -56,24 +56,24 @@ typedef uintptr uintreg;
typedef uint8 bool;
typedef uint8 byte;
typedef struct Func Func;
typedef struct G G;
typedef struct Lock Lock;
typedef struct M M;
typedef struct P P;
typedef struct Note Note;
typedef struct g G;
typedef struct mutex Lock;
typedef struct m M;
typedef struct p P;
typedef struct note Note;
typedef struct String String;
typedef struct FuncVal FuncVal;
typedef struct SigTab SigTab;
typedef struct MCache MCache;
typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc;
typedef struct Hchan Hchan;
typedef struct Timers Timers;
typedef struct Timer Timer;
typedef struct GCStats GCStats;
typedef struct gcstats GCStats;
typedef struct LFNode LFNode;
typedef struct ParFor ParFor;
typedef struct ParForThread ParForThread;
typedef struct CgoMal CgoMal;
typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
typedef struct DebugVars DebugVars;
......@@ -81,8 +81,8 @@ typedef struct __go_open_array Slice;
typedef struct __go_interface Iface;
typedef struct __go_empty_interface Eface;
typedef struct __go_type_descriptor Type;
typedef struct __go_defer_stack Defer;
typedef struct __go_panic_stack Panic;
typedef struct _defer Defer;
typedef struct _panic Panic;
typedef struct __go_ptr_type PtrType;
typedef struct __go_func_type FuncType;
......@@ -90,9 +90,26 @@ typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType;
typedef struct __go_channel_type ChanType;
typedef struct Traceback Traceback;
typedef struct traceback Traceback;
typedef struct Location Location;
typedef struct location Location;
struct String
{
const byte* str;
intgo len;
};
struct FuncVal
{
void (*fn)(void);
// variable-size, fn-specific data here
};
#include "array.h"
#include "interface.h"
#include "runtime.inc"
/*
* Per-CPU declaration.
......@@ -103,33 +120,6 @@ extern G* runtime_g(void);
extern M runtime_m0;
extern G runtime_g0;
/*
* defined constants
*/
enum
{
// G status
//
// If you add to this list, add to the list
// of "okay during garbage collection" status
// in mgc0.c too.
Gidle,
Grunnable,
Grunning,
Gsyscall,
Gwaiting,
Gmoribund_unused, // currently unused, but hardcoded in gdb scripts
Gdead,
};
enum
{
// P status
Pidle,
Prunning,
Psyscall,
Pgcstop,
Pdead,
};
enum
{
true = 1,
......@@ -146,184 +136,6 @@ enum
// Global <-> per-M stack segment cache transfer batch size.
StackCacheBatch = 16,
};
/*
* structures
*/
struct Lock
{
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
uintptr key;
};
struct Note
{
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
uintptr key;
};
struct String
{
const byte* str;
intgo len;
};
struct FuncVal
{
void (*fn)(void);
// variable-size, fn-specific data here
};
struct GCStats
{
// the struct must consist of only uint64's,
// because it is casted to uint64[].
uint64 nhandoff;
uint64 nhandoffcnt;
uint64 nprocyield;
uint64 nosyield;
uint64 nsleep;
};
// A location in the program, used for backtraces.
struct Location
{
uintptr pc;
String filename;
String function;
intgo lineno;
};
struct G
{
Defer* defer;
Panic* panic;
void* exception; // current exception being thrown
bool is_foreign; // whether current exception from other language
void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
size_t gcstack_size;
void* gcnext_segment;
void* gcnext_sp;
void* gcinitial_sp;
ucontext_t gcregs;
byte* entry; // initial function
void* param; // passed parameter on wakeup
bool fromgogo; // reached from gogo
int16 status;
uint32 selgen; // valid sudog pointer
int64 goid;
int64 waitsince; // approx time when the G become blocked
const char* waitreason; // if status==Gwaiting
G* schedlink;
bool ispanic;
bool issystem; // do not output in stack dump
bool isbackground; // ignore in deadlock detector
bool paniconfault; // panic (instead of crash) on unexpected fault address
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
int32 sig;
int32 writenbuf;
byte* writebuf;
uintptr sigcode0;
uintptr sigcode1;
// uintptr sigpc;
uintptr gopc; // pc of go statement that created this goroutine
int32 ncgo;
CgoMal* cgomal;
Traceback* traceback;
ucontext_t context;
void* stack_context[10];
};
struct M
{
G* g0; // goroutine with scheduling stack
G* gsignal; // signal-handling G
byte* gsignalstack;
size_t gsignalstacksize;
void (*mstartfn)(void);
G* curg; // current running goroutine
G* caughtsig; // goroutine running during fatal signal
P* p; // attached P for executing Go code (nil if not executing Go code)
P* nextp;
int32 id;
int32 mallocing;
int32 throwing;
int32 gcing;
int32 locks;
int32 softfloat;
int32 dying;
int32 profilehz;
int32 helpgc;
bool spinning; // M is out of work and is actively looking for work
bool blocked; // M is blocked on a Note
uint32 fastrand;
uint64 ncgocall; // number of cgo calls in total
int32 ncgo; // number of cgo calls currently in progress
CgoMal* cgomal;
Note park;
M* alllink; // on allm
M* schedlink;
MCache *mcache;
G* lockedg;
Location createstack[32]; // Stack that created this thread.
uint32 locked; // tracking for LockOSThread
M* nextwaitm; // next M waiting for lock
uintptr waitsema; // semaphore for parking on locks
uint32 waitsemacount;
uint32 waitsemalock;
GCStats gcstats;
bool needextram;
bool dropextram; // for gccgo: drop after call is done.
uint8 traceback;
bool (*waitunlockf)(G*, void*);
void* waitlock;
uintptr end[];
};
struct P
{
Lock;
int32 id;
uint32 status; // one of Pidle/Prunning/...
P* link;
uint32 schedtick; // incremented on every scheduler call
uint32 syscalltick; // incremented on every system call
M* m; // back-link to associated M (nil if idle)
MCache* mcache;
Defer* deferpool; // pool of available Defer structs (see panic.c)
// Cache of goroutine ids, amortizes accesses to runtime_sched.goidgen.
uint64 goidcache;
uint64 goidcacheend;
// Queue of runnable goroutines.
uint32 runqhead;
uint32 runqtail;
G* runq[256];
// Available G's (status == Gdead)
G* gfree;
int32 gfreecnt;
byte pad[64];
};
// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
// External locks are not recursive; a second lock is silently ignored.
// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
// goroutine is holding the lock during the initialization phase.
enum
{
LockExternal = 1,
LockInternal = 2,
};
struct SigTab
{
......@@ -331,16 +143,6 @@ struct SigTab
int32 flags;
void* fwdsig;
};
enum
{
SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
SigPanic = 1<<3, // if the signal is from the kernel, panic
SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
SigHandling = 1<<5, // our signal handler is registered
SigGoExit = 1<<6, // cause all runtime procs to exit (only used on Plan 9).
};
// Layout of in-memory per-function information prepared by linker
// See http://golang.org/s/go12symtab.
......@@ -438,14 +240,6 @@ struct ParFor
uint64 nsleep;
};
// Track memory allocated by code not written in Go during a cgo call,
// so that the garbage collector can see them.
struct CgoMal
{
CgoMal *next;
void *alloc;
};
// Holds variables parsed from GODEBUG env var.
struct DebugVars
{
......@@ -565,7 +359,7 @@ void runtime_ready(G*);
String runtime_getenv(const char*);
int32 runtime_atoi(const byte*, intgo);
void* runtime_mstart(void*);
G* runtime_malg(int32, byte**, size_t*);
G* runtime_malg(int32, byte**, uintptr*);
void runtime_mpreinit(M*);
void runtime_minit(void);
void runtime_unminit(void);
......@@ -604,7 +398,7 @@ int32 runtime_round2(int32 x); // round x up to a power of 2.
#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
void runtime_setmg(M*, G*);
void runtime_setg(G*);
void runtime_newextram(void);
#define runtime_exit(s) exit(s)
#define runtime_breakpoint() __builtin_trap()
......
......@@ -65,7 +65,7 @@ func sync.runtime_procPin() (p int) {
mp = runtime_m();
// Disable preemption.
mp->locks++;
p = mp->p->id;
p = ((P*)mp->p)->id;
}
func sync.runtime_procUnpin() {
......@@ -78,7 +78,7 @@ func sync_atomic.runtime_procPin() (p int) {
mp = runtime_m();
// Disable preemption.
mp->locks++;
p = mp->p->id;
p = ((P*)mp->p)->id;
}
func sync_atomic.runtime_procUnpin() {
......
......@@ -26,7 +26,7 @@ runtime_initsig(bool preinit)
// First call: basic setup.
for(i = 0; runtime_sigtab[i].sig != -1; i++) {
t = &runtime_sigtab[i];
if((t->flags == 0) || (t->flags & SigDefault))
if((t->flags == 0) || (t->flags & _SigDefault))
continue;
t->fwdsig = runtime_getsig(i);
......@@ -42,10 +42,10 @@ runtime_initsig(bool preinit)
}
}
if(runtime_isarchive && (t->flags&SigPanic) == 0)
if(runtime_isarchive && (t->flags&_SigPanic) == 0)
continue;
t->flags |= SigHandling;
t->flags |= _SigHandling;
runtime_setsig(i, runtime_sighandler, true);
}
}
......@@ -67,8 +67,8 @@ runtime_sigenable(uint32 sig)
if(t == nil)
return;
if((t->flags & SigNotify) && !(t->flags & SigHandling)) {
t->flags |= SigHandling;
if((t->flags & _SigNotify) && !(t->flags & _SigHandling)) {
t->flags |= _SigHandling;
t->fwdsig = runtime_getsig(i);
runtime_setsig(i, runtime_sighandler, true);
}
......@@ -92,7 +92,7 @@ runtime_sigdisable(uint32 sig)
return;
if((sig == SIGHUP || sig == SIGINT) && t->fwdsig == GO_SIG_IGN) {
t->flags &= ~SigHandling;
t->flags &= ~_SigHandling;
runtime_setsig(i, t->fwdsig, true);
}
}
......@@ -114,8 +114,8 @@ runtime_sigignore(uint32 sig)
if(t == nil)
return;
if((t->flags & SigNotify) != 0) {
t->flags &= ~SigHandling;
if((t->flags & _SigNotify) != 0) {
t->flags &= ~_SigHandling;
runtime_setsig(i, GO_SIG_IGN, true);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment