Commit 6fcb740a by Ian Lance Taylor

runtime: copy more scheduler code from Go 1.7 runtime

    
    I looked at a diff of proc.go between Go 1.7 and gccgo, and copied
    over all the easy stuff.
    
    Reviewed-on: https://go-review.googlesource.com/35090

From-SVN: r244291
parent c16880ef
f439989e483b7c2eada6ddcf6e730a791cce603f d3725d876496f2cca3d6ce538e98b58c85d90bfb
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.
...@@ -44,7 +44,7 @@ func NumCPU() int ...@@ -44,7 +44,7 @@ func NumCPU() int
// NumCgoCall returns the number of cgo calls made by the current process. // NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 { func NumCgoCall() int64 {
var n int64 var n int64
for mp := (*m)(atomic.Loadp(unsafe.Pointer(allm()))); mp != nil; mp = mp.alllink { for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
n += int64(mp.ncgocall) n += int64(mp.ncgocall)
} }
return n return n
......
...@@ -24,8 +24,7 @@ import ( ...@@ -24,8 +24,7 @@ import (
var Entersyscall = entersyscall var Entersyscall = entersyscall
var Exitsyscall = exitsyscall var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
// var LockedOSThread = lockedOSThread
// var Xadduintptr = xadduintptr // var Xadduintptr = xadduintptr
......
...@@ -521,7 +521,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { ...@@ -521,7 +521,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// Most clients should use the runtime/pprof package instead // Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly. // of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
first := (*m)(atomic.Loadp(unsafe.Pointer(allm()))) first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink { for mp := first; mp != nil; mp = mp.alllink {
n++ n++
} }
......
...@@ -11,6 +11,13 @@ import ( ...@@ -11,6 +11,13 @@ import (
// Temporary for C code to call: // Temporary for C code to call:
//go:linkname minit runtime.minit //go:linkname minit runtime.minit
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(true, true, &mp.gsignalstack, &mp.gsignalstacksize)
mp.gsignal.m = mp
}
// minit is called to initialize a new m (including the bootstrap m). // minit is called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory. // Called on the new thread, cannot allocate memory.
func minit() { func minit() {
......
...@@ -755,10 +755,8 @@ const _TracebackMaxFrames = 100 ...@@ -755,10 +755,8 @@ const _TracebackMaxFrames = 100
var ( var (
// emptystring string // emptystring string
allglen uintptr allglen uintptr
allm *m
// allm *m
allp [_MaxGomaxprocs + 1]*p allp [_MaxGomaxprocs + 1]*p
gomaxprocs int32 gomaxprocs int32
panicking uint32 panicking uint32
......
...@@ -234,20 +234,6 @@ func newobject(*_type) unsafe.Pointer ...@@ -234,20 +234,6 @@ func newobject(*_type) unsafe.Pointer
// For gccgo unless and until we port malloc.go. // For gccgo unless and until we port malloc.go.
func newarray(*_type, int) unsafe.Pointer func newarray(*_type, int) unsafe.Pointer
// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
// For gccgo here unless and until we port proc.go.
// Note that this differs from the gc implementation; the gc implementation
// adds sys.PtrSize to the address of the interface value, but GCC's
// alias analysis decides that that can not be a reference to the second
// field of the interface, and in some cases it drops the initialization
// of the second field as a dead store.
//go:nosplit
func funcPC(f interface{}) uintptr {
i := (*iface)(unsafe.Pointer(&f))
return **(**uintptr)(i.data)
}
// For gccgo, to communicate from the C code to the Go code. // For gccgo, to communicate from the C code to the Go code.
//go:linkname setIsCgo runtime.setIsCgo //go:linkname setIsCgo runtime.setIsCgo
func setIsCgo() { func setIsCgo() {
...@@ -352,56 +338,6 @@ func exitsyscall(int32) ...@@ -352,56 +338,6 @@ func exitsyscall(int32)
func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int) func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
func goparkunlock(*mutex, string, byte, int) func goparkunlock(*mutex, string, byte, int)
// Temporary hack for gccgo until we port proc.go.
//go:nosplit
func acquireSudog() *sudog {
mp := acquirem()
pp := mp.p.ptr()
if len(pp.sudogcache) == 0 {
pp.sudogcache = append(pp.sudogcache, new(sudog))
}
n := len(pp.sudogcache)
s := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
if s.elem != nil {
throw("acquireSudog: found s.elem != nil in cache")
}
releasem(mp)
return s
}
// Temporary hack for gccgo until we port proc.go.
//go:nosplit
func releaseSudog(s *sudog) {
if s.elem != nil {
throw("runtime: sudog with non-nil elem")
}
if s.selectdone != nil {
throw("runtime: sudog with non-nil selectdone")
}
if s.next != nil {
throw("runtime: sudog with non-nil next")
}
if s.prev != nil {
throw("runtime: sudog with non-nil prev")
}
if s.waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
}
if s.c != nil {
throw("runtime: sudog with non-nil c")
}
gp := getg()
if gp.param != nil {
throw("runtime: releaseSudog with non-nil gp.param")
}
mp := acquirem() // avoid rescheduling to another P
pp := mp.p.ptr()
pp.sudogcache = append(pp.sudogcache, s)
releasem(mp)
}
// Temporary hack for gccgo until we port the garbage collector. // Temporary hack for gccgo until we port the garbage collector.
func typeBitsBulkBarrier(typ *_type, p, size uintptr) {} func typeBitsBulkBarrier(typ *_type, p, size uintptr) {}
...@@ -450,7 +386,6 @@ func LockOSThread() ...@@ -450,7 +386,6 @@ func LockOSThread()
func UnlockOSThread() func UnlockOSThread()
func lockOSThread() func lockOSThread()
func unlockOSThread() func unlockOSThread()
func allm() *m
// Temporary for gccgo until we port malloc.go // Temporary for gccgo until we port malloc.go
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
...@@ -466,14 +401,6 @@ func setGCPercent(in int32) (out int32) { ...@@ -466,14 +401,6 @@ func setGCPercent(in int32) (out int32) {
return setgcpercent(in) return setgcpercent(in)
} }
// Temporary for gccgo until we port proc.go.
func setmaxthreads(int) int
//go:linkname setMaxThreads runtime_debug.setMaxThreads
func setMaxThreads(in int) (out int) {
return setmaxthreads(in)
}
// Temporary for gccgo until we port atomic_pointer.go. // Temporary for gccgo until we port atomic_pointer.go.
//go:nosplit //go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
...@@ -495,7 +422,6 @@ func getZerobase() *uintptr { ...@@ -495,7 +422,6 @@ func getZerobase() *uintptr {
// Temporary for gccgo until we port proc.go. // Temporary for gccgo until we port proc.go.
func sigprof() func sigprof()
func mcount() int32
func goexit1() func goexit1()
// Get signal trampoline, written in C. // Get signal trampoline, written in C.
...@@ -549,6 +475,12 @@ func getallg(i int) *g { ...@@ -549,6 +475,12 @@ func getallg(i int) *g {
return allgs[i] return allgs[i]
} }
// Temporary for gccgo until we port the garbage collector.
//go:linkname getallm runtime.getallm
func getallm() *m {
return allm
}
// Throw and rethrow an exception. // Throw and rethrow an exception.
func throwException() func throwException()
func rethrowException() func rethrowException()
...@@ -577,21 +509,6 @@ var work struct { ...@@ -577,21 +509,6 @@ var work struct {
} }
} }
// gcount is temporary for gccgo until more of proc.go is ported.
// This is a copy of the C function we used to use.
func gcount() int32 {
n := int32(0)
lock(&allglock)
for _, gp := range allgs {
s := readgstatus(gp)
if s == _Grunnable || s == _Grunning || s == _Gsyscall || s == _Gwaiting {
n++
}
}
unlock(&allglock)
return n
}
// Temporary for gccgo until we port mgc.go. // Temporary for gccgo until we port mgc.go.
var gcBlackenEnabled uint32 var gcBlackenEnabled uint32
......
...@@ -474,7 +474,7 @@ dumpms(void) ...@@ -474,7 +474,7 @@ dumpms(void)
{ {
M *mp; M *mp;
for(mp = runtime_allm; mp != nil; mp = mp->alllink) { for(mp = runtime_getallm(); mp != nil; mp = mp->alllink) {
dumpint(TagOSThread); dumpint(TagOSThread);
dumpint((uintptr)mp); dumpint((uintptr)mp);
dumpint(mp->id); dumpint(mp->id);
......
...@@ -1279,7 +1279,6 @@ markroot(ParFor *desc, uint32 i) ...@@ -1279,7 +1279,6 @@ markroot(ParFor *desc, uint32 i)
// For gccgo we use this for all the other global roots. // For gccgo we use this for all the other global roots.
enqueue1(&wbuf, (Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0}); enqueue1(&wbuf, (Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
enqueue1(&wbuf, (Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0}); enqueue1(&wbuf, (Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
enqueue1(&wbuf, (Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0}); enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0}); enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
break; break;
...@@ -2002,7 +2001,7 @@ runtime_updatememstats(GCStats *stats) ...@@ -2002,7 +2001,7 @@ runtime_updatememstats(GCStats *stats)
if(stats) if(stats)
runtime_memclr((byte*)stats, sizeof(*stats)); runtime_memclr((byte*)stats, sizeof(*stats));
stacks_inuse = 0; stacks_inuse = 0;
for(mp=runtime_allm; mp; mp=mp->alllink) { for(mp=runtime_getallm(); mp; mp=mp->alllink) {
//stacks_inuse += mp->stackinuse*FixedStack; //stacks_inuse += mp->stackinuse*FixedStack;
if(stats) { if(stats) {
src = (uint64*)&mp->gcstats; src = (uint64*)&mp->gcstats;
......
...@@ -376,7 +376,6 @@ Sched* runtime_sched; ...@@ -376,7 +376,6 @@ Sched* runtime_sched;
M runtime_m0; M runtime_m0;
G runtime_g0; // idle goroutine for m0 G runtime_g0; // idle goroutine for m0
G* runtime_lastg; G* runtime_lastg;
M* runtime_allm;
P** runtime_allp; P** runtime_allp;
int8* runtime_goos; int8* runtime_goos;
int32 runtime_ncpu; int32 runtime_ncpu;
...@@ -385,18 +384,17 @@ bool runtime_precisestack; ...@@ -385,18 +384,17 @@ bool runtime_precisestack;
bool runtime_isarchive; bool runtime_isarchive;
void* runtime_mstart(void*); void* runtime_mstart(void*);
static void mcommoninit(M*);
static void exitsyscall0(G*); static void exitsyscall0(G*);
static void park0(G*); static void park0(G*);
static void goexit0(G*); static void goexit0(G*);
static void gfput(P*, G*);
static G* gfget(P*);
static bool exitsyscallfast(void); static bool exitsyscallfast(void);
extern void setncpu(int32) extern void setncpu(int32)
__asm__(GOSYM_PREFIX "runtime.setncpu"); __asm__(GOSYM_PREFIX "runtime.setncpu");
extern void allgadd(G*) extern void allgadd(G*)
__asm__(GOSYM_PREFIX "runtime.allgadd"); __asm__(GOSYM_PREFIX "runtime.allgadd");
extern void mcommoninit(M*)
__asm__(GOSYM_PREFIX "runtime.mcommoninit");
extern void stopm(void) extern void stopm(void)
__asm__(GOSYM_PREFIX "runtime.stopm"); __asm__(GOSYM_PREFIX "runtime.stopm");
extern void handoffp(P*) extern void handoffp(P*)
...@@ -409,6 +407,10 @@ extern void schedule(void) ...@@ -409,6 +407,10 @@ extern void schedule(void)
__asm__(GOSYM_PREFIX "runtime.schedule"); __asm__(GOSYM_PREFIX "runtime.schedule");
extern void execute(G*, bool) extern void execute(G*, bool)
__asm__(GOSYM_PREFIX "runtime.execute"); __asm__(GOSYM_PREFIX "runtime.execute");
extern void gfput(P*, G*)
__asm__(GOSYM_PREFIX "runtime.gfput");
extern G* gfget(P*)
__asm__(GOSYM_PREFIX "runtime.gfget");
extern void procresize(int32) extern void procresize(int32)
__asm__(GOSYM_PREFIX "runtime.procresize"); __asm__(GOSYM_PREFIX "runtime.procresize");
extern void acquirep(P*) extern void acquirep(P*)
...@@ -620,16 +622,6 @@ void getTraceback(G* me, G* gp) ...@@ -620,16 +622,6 @@ void getTraceback(G* me, G* gp)
} }
} }
static void
checkmcount(void)
{
// sched lock is held
if(runtime_sched->mcount > runtime_sched->maxmcount) {
runtime_printf("runtime: program exceeds %d-thread limit\n", runtime_sched->maxmcount);
runtime_throw("thread exhaustion");
}
}
// Do a stack trace of gp, and then restore the context to // Do a stack trace of gp, and then restore the context to
// gp->dotraceback. // gp->dotraceback.
...@@ -649,30 +641,6 @@ gtraceback(G* gp) ...@@ -649,30 +641,6 @@ gtraceback(G* gp)
runtime_gogo(traceback->gp); runtime_gogo(traceback->gp);
} }
static void
mcommoninit(M *mp)
{
// If there is no mcache runtime_callers() will crash,
// and we are most likely in sysmon thread so the stack is senseless anyway.
if(g->m->mcache)
runtime_callers(1, mp->createstack, nelem(mp->createstack), false);
mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
runtime_lock(&runtime_sched->lock);
mp->id = runtime_sched->mcount++;
checkmcount();
runtime_mpreinit(mp);
// Add to runtime_allm so garbage collector doesn't free m
// when it is just in a register or thread-local storage.
mp->alllink = runtime_allm;
// runtime_NumCgoCall() iterates over allm w/o schedlock,
// so we need to publish it safely.
runtime_atomicstorep(&runtime_allm, mp);
runtime_unlock(&runtime_sched->lock);
}
// Called to start an M. // Called to start an M.
void* void*
runtime_mstart(void* mp) runtime_mstart(void* mp)
...@@ -1332,33 +1300,6 @@ syscall_exitsyscall() ...@@ -1332,33 +1300,6 @@ syscall_exitsyscall()
runtime_exitsyscall(0); runtime_exitsyscall(0);
} }
// Called from syscall package before fork.
void syscall_runtime_BeforeFork(void)
__asm__(GOSYM_PREFIX "syscall.runtime_BeforeFork");
void
syscall_runtime_BeforeFork(void)
{
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we stay on the same M where we disable profiling.
runtime_m()->locks++;
if(runtime_m()->profilehz != 0)
runtime_resetcpuprofiler(0);
}
// Called from syscall package after fork in parent.
void syscall_runtime_AfterFork(void)
__asm__(GOSYM_PREFIX "syscall.runtime_AfterFork");
void
syscall_runtime_AfterFork(void)
{
int32 hz;
hz = runtime_sched->profilehz;
if(hz != 0)
runtime_resetcpuprofiler(hz);
runtime_m()->locks--;
}
// Allocate a new g, with a stack big enough for stacksize bytes. // Allocate a new g, with a stack big enough for stacksize bytes.
G* G*
runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize) runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize)
...@@ -1480,55 +1421,6 @@ __go_go(void (*fn)(void*), void* arg) ...@@ -1480,55 +1421,6 @@ __go_go(void (*fn)(void*), void* arg)
return newg; return newg;
} }
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
static void
gfput(P *p, G *gp)
{
gp->schedlink = (uintptr)p->gfree;
p->gfree = gp;
p->gfreecnt++;
if(p->gfreecnt >= 64) {
runtime_lock(&runtime_sched->gflock);
while(p->gfreecnt >= 32) {
p->gfreecnt--;
gp = p->gfree;
p->gfree = (G*)gp->schedlink;
gp->schedlink = (uintptr)runtime_sched->gfree;
runtime_sched->gfree = gp;
}
runtime_unlock(&runtime_sched->gflock);
}
}
// Get from gfree list.
// If local list is empty, grab a batch from global list.
static G*
gfget(P *p)
{
G *gp;
retry:
gp = p->gfree;
if(gp == nil && runtime_sched->gfree) {
runtime_lock(&runtime_sched->gflock);
while(p->gfreecnt < 32 && runtime_sched->gfree) {
p->gfreecnt++;
gp = runtime_sched->gfree;
runtime_sched->gfree = (G*)gp->schedlink;
gp->schedlink = (uintptr)p->gfree;
p->gfree = gp;
}
runtime_unlock(&runtime_sched->gflock);
goto retry;
}
if(gp) {
p->gfree = (G*)gp->schedlink;
p->gfreecnt--;
}
return gp;
}
void void
runtime_Breakpoint(void) runtime_Breakpoint(void)
{ {
...@@ -1543,74 +1435,6 @@ runtime_Gosched(void) ...@@ -1543,74 +1435,6 @@ runtime_Gosched(void)
runtime_gosched(); runtime_gosched();
} }
// lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
// after they modify m->locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
static void
lockOSThread(void)
{
g->m->lockedg = g;
g->lockedm = g->m;
}
void runtime_LockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.LockOSThread");
void
runtime_LockOSThread(void)
{
g->m->locked |= _LockExternal;
lockOSThread();
}
void
runtime_lockOSThread(void)
{
g->m->locked += _LockInternal;
lockOSThread();
}
// unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
// after they update m->locked. Do not allow preemption during this call,
// or else the m might be in different in this function than in the caller.
static void
unlockOSThread(void)
{
if(g->m->locked != 0)
return;
g->m->lockedg = nil;
g->lockedm = nil;
}
void runtime_UnlockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.UnlockOSThread");
void
runtime_UnlockOSThread(void)
{
g->m->locked &= ~_LockExternal;
unlockOSThread();
}
void
runtime_unlockOSThread(void)
{
if(g->m->locked < _LockInternal)
runtime_throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
g->m->locked -= _LockInternal;
unlockOSThread();
}
bool
runtime_lockedOSThread(void)
{
return g->lockedm != nil && g->m->lockedg != nil;
}
int32
runtime_mcount(void)
{
return runtime_sched->mcount;
}
static struct { static struct {
uint32 lock; uint32 lock;
int32 hz; int32 hz;
...@@ -1719,71 +1543,6 @@ runtime_setcpuprofilerate_m(int32 hz) ...@@ -1719,71 +1543,6 @@ runtime_setcpuprofilerate_m(int32 hz)
g->m->locks--; g->m->locks--;
} }
intgo
runtime_setmaxthreads(intgo in)
{
intgo out;
runtime_lock(&runtime_sched->lock);
out = (intgo)runtime_sched->maxmcount;
runtime_sched->maxmcount = (int32)in;
checkmcount();
runtime_unlock(&runtime_sched->lock);
return out;
}
static intgo
procPin()
{
M *mp;
mp = runtime_m();
mp->locks++;
return (intgo)(((P*)mp->p)->id);
}
static void
procUnpin()
{
runtime_m()->locks--;
}
intgo sync_runtime_procPin(void)
__asm__ (GOSYM_PREFIX "sync.runtime_procPin");
intgo
sync_runtime_procPin()
{
return procPin();
}
void sync_runtime_procUnpin(void)
__asm__ (GOSYM_PREFIX "sync.runtime_procUnpin");
void
sync_runtime_procUnpin()
{
procUnpin();
}
intgo sync_atomic_runtime_procPin(void)
__asm__ (GOSYM_PREFIX "sync_atomic.runtime_procPin");
intgo
sync_atomic_runtime_procPin()
{
return procPin();
}
void sync_atomic_runtime_procUnpin(void)
__asm__ (GOSYM_PREFIX "sync_atomic.runtime_procUnpin");
void
sync_atomic_runtime_procUnpin()
{
procUnpin();
}
// Return whether we are waiting for a GC. This gc toolchain uses // Return whether we are waiting for a GC. This gc toolchain uses
// preemption instead. // preemption instead.
bool bool
...@@ -1802,17 +1561,6 @@ os_beforeExit() ...@@ -1802,17 +1561,6 @@ os_beforeExit()
{ {
} }
// For Go code to look at variables, until we port proc.go.
extern M* runtime_go_allm(void)
__asm__ (GOSYM_PREFIX "runtime.allm");
M*
runtime_go_allm()
{
return runtime_allm;
}
intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU"); intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU");
intgo intgo
......
...@@ -237,7 +237,8 @@ extern G* runtime_getallg(intgo) ...@@ -237,7 +237,8 @@ extern G* runtime_getallg(intgo)
extern uintptr runtime_getallglen(void) extern uintptr runtime_getallglen(void)
__asm__(GOSYM_PREFIX "runtime.getallglen"); __asm__(GOSYM_PREFIX "runtime.getallglen");
extern G* runtime_lastg; extern G* runtime_lastg;
extern M* runtime_allm; extern M* runtime_getallm(void)
__asm__(GOSYM_PREFIX "runtime.getallm");
extern P** runtime_allp; extern P** runtime_allp;
extern Sched* runtime_sched; extern Sched* runtime_sched;
extern uint32 runtime_panicking(void) extern uint32 runtime_panicking(void)
...@@ -301,7 +302,6 @@ int32 runtime_atoi(const byte*, intgo); ...@@ -301,7 +302,6 @@ int32 runtime_atoi(const byte*, intgo);
void* runtime_mstart(void*); void* runtime_mstart(void*);
G* runtime_malg(bool, bool, byte**, uintptr*) G* runtime_malg(bool, bool, byte**, uintptr*)
__asm__(GOSYM_PREFIX "runtime.malg"); __asm__(GOSYM_PREFIX "runtime.malg");
void runtime_mpreinit(M*);
void runtime_minit(void) void runtime_minit(void)
__asm__ (GOSYM_PREFIX "runtime.minit"); __asm__ (GOSYM_PREFIX "runtime.minit");
void runtime_signalstack(byte*, uintptr) void runtime_signalstack(byte*, uintptr)
...@@ -313,8 +313,6 @@ void runtime_freemcache(MCache*) ...@@ -313,8 +313,6 @@ void runtime_freemcache(MCache*)
void runtime_mallocinit(void); void runtime_mallocinit(void);
void runtime_mprofinit(void); void runtime_mprofinit(void);
#define runtime_getcallersp(p) __builtin_frame_address(0) #define runtime_getcallersp(p) __builtin_frame_address(0)
int32 runtime_mcount(void)
__asm__ (GOSYM_PREFIX "runtime.mcount");
void runtime_mcall(void(*)(G*)); void runtime_mcall(void(*)(G*));
uint32 runtime_fastrand1(void) __asm__ (GOSYM_PREFIX "runtime.fastrand1"); uint32 runtime_fastrand1(void) __asm__ (GOSYM_PREFIX "runtime.fastrand1");
int32 runtime_timediv(int64, int32, int32*) int32 runtime_timediv(int64, int32, int32*)
...@@ -394,8 +392,6 @@ void runtime_crash(void) ...@@ -394,8 +392,6 @@ void runtime_crash(void)
void runtime_parsedebugvars(void) void runtime_parsedebugvars(void)
__asm__(GOSYM_PREFIX "runtime.parsedebugvars"); __asm__(GOSYM_PREFIX "runtime.parsedebugvars");
void _rt0_go(void); void _rt0_go(void);
intgo runtime_setmaxthreads(intgo)
__asm__ (GOSYM_PREFIX "runtime.setmaxthreads");
G* runtime_timejump(void); G* runtime_timejump(void);
void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*)); void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
...@@ -522,8 +518,6 @@ void runtime_lockOSThread(void) ...@@ -522,8 +518,6 @@ void runtime_lockOSThread(void)
__asm__(GOSYM_PREFIX "runtime.lockOSThread"); __asm__(GOSYM_PREFIX "runtime.lockOSThread");
void runtime_unlockOSThread(void) void runtime_unlockOSThread(void)
__asm__(GOSYM_PREFIX "runtime.unlockOSThread"); __asm__(GOSYM_PREFIX "runtime.unlockOSThread");
bool runtime_lockedOSThread(void)
__asm__(GOSYM_PREFIX "runtime.lockedOSThread");
void runtime_printcreatedby(G*) void runtime_printcreatedby(G*)
__asm__(GOSYM_PREFIX "runtime.printcreatedby"); __asm__(GOSYM_PREFIX "runtime.printcreatedby");
......
...@@ -95,15 +95,6 @@ runtime_cputicks(void) ...@@ -95,15 +95,6 @@ runtime_cputicks(void)
#endif #endif
} }
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void
runtime_mpreinit(M *mp)
{
mp->gsignal = runtime_malg(true, true, (byte**)&mp->gsignalstack, &mp->gsignalstacksize);
mp->gsignal->m = mp;
}
void void
runtime_signalstack(byte *p, uintptr n) runtime_signalstack(byte *p, uintptr n)
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment