Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
a4ad1c7a
Commit
a4ad1c7a
authored
Dec 17, 2010
by
Ian Lance Taylor
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Rework locking code to split stack much less.
From-SVN: r167973
parent
785e11cc
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
83 additions
and
31 deletions
+83
-31
libgo/runtime/malloc.goc
+3
-0
libgo/runtime/malloc.h
+1
-0
libgo/runtime/mfinal.c
+7
-1
libgo/runtime/mgc0.c
+10
-10
libgo/runtime/mprof.goc
+7
-1
libgo/runtime/runtime.h
+5
-4
libgo/runtime/sigqueue.goc
+2
-2
libgo/runtime/thread.c
+48
-13
No files found.
libgo/runtime/malloc.goc
View file @
a4ad1c7a
...
...
@@ -270,6 +270,9 @@ runtime_allocmcache(void)
void
runtime_mallocinit(void)
{
runtime_initfintab();
runtime_Mprof_Init();
runtime_SysMemInit();
runtime_InitSizes();
runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
...
...
libgo/runtime/malloc.h
View file @
a4ad1c7a
...
...
@@ -375,6 +375,7 @@ enum
RefFlags
=
0xFFFF0000U
,
};
void
runtime_Mprof_Init
(
void
);
void
runtime_MProf_Malloc
(
void
*
,
uintptr
);
void
runtime_MProf_Free
(
void
*
,
uintptr
);
void
runtime_MProf_Mark
(
void
(
*
scan
)(
byte
*
,
int64
));
...
...
libgo/runtime/mfinal.c
View file @
a4ad1c7a
...
...
@@ -5,7 +5,13 @@
#include "runtime.h"
#include "malloc.h"
static
Lock
finlock
=
LOCK_INITIALIZER
;
static
Lock
finlock
;
void
runtime_initfintab
()
{
runtime_initlock
(
&
finlock
);
}
// Finalizer hash table. Direct hash, linear scan, at most 3/4 full.
// Table size is power of 3 so that hash can be key % max.
...
...
libgo/runtime/mgc0.c
View file @
a4ad1c7a
...
...
@@ -27,7 +27,7 @@ struct BlockList
};
static
bool
finstarted
;
static
Lock
finqlock
=
LOCK
_INITIALIZER
;
static
pthread_mutex_t
finqlock
=
PTHREAD_MUTEX
_INITIALIZER
;
static
pthread_cond_t
finqcond
=
PTHREAD_COND_INITIALIZER
;
static
Finalizer
*
finq
;
static
int32
fingwait
;
...
...
@@ -284,7 +284,7 @@ sweep(void)
sweepspan
(
s
);
}
static
Lock
gcsema
=
LOCK
_INITIALIZER
;
static
pthread_mutex_t
gcsema
=
PTHREAD_MUTEX
_INITIALIZER
;
// Initialized from $GOGC. GOGC=off means no gc.
//
...
...
@@ -327,8 +327,8 @@ runtime_gc(int32 force __attribute__ ((unused)))
if
(
gcpercent
<
0
)
return
;
runtime
_lock
(
&
finqlock
);
runtime
_lock
(
&
gcsema
);
pthread_mutex
_lock
(
&
finqlock
);
pthread_mutex
_lock
(
&
gcsema
);
m
->
locks
++
;
// disable gc during the mallocs in newproc
t0
=
runtime_nanotime
();
runtime_stoptheworld
();
...
...
@@ -345,7 +345,7 @@ runtime_gc(int32 force __attribute__ ((unused)))
mstats
.
pause_ns
+=
t1
-
t0
;
if
(
mstats
.
debuggc
)
runtime_printf
(
"pause %llu
\n
"
,
(
unsigned
long
long
)
t1
-
t0
);
runtime
_unlock
(
&
gcsema
);
pthread_mutex
_unlock
(
&
gcsema
);
runtime_starttheworld
();
// finqlock is still held.
...
...
@@ -362,7 +362,7 @@ runtime_gc(int32 force __attribute__ ((unused)))
}
}
m
->
locks
--
;
runtime
_unlock
(
&
finqlock
);
pthread_mutex
_unlock
(
&
finqlock
);
}
static
void
...
...
@@ -373,16 +373,16 @@ runfinq(void* dummy)
USED
(
dummy
);
for
(;;)
{
runtime
_lock
(
&
finqlock
);
pthread_mutex
_lock
(
&
finqlock
);
f
=
finq
;
finq
=
nil
;
if
(
f
==
nil
)
{
fingwait
=
1
;
pthread_cond_wait
(
&
finqcond
,
&
finqlock
.
mutex
);
runtime
_unlock
(
&
finqlock
);
pthread_cond_wait
(
&
finqcond
,
&
finqlock
);
pthread_mutex
_unlock
(
&
finqlock
);
continue
;
}
runtime
_unlock
(
&
finqlock
);
pthread_mutex
_unlock
(
&
finqlock
);
for
(;
f
;
f
=
next
)
{
void
*
params
[
1
];
...
...
libgo/runtime/mprof.goc
View file @
a4ad1c7a
...
...
@@ -14,7 +14,7 @@ package runtime
typedef struct __go_open_array Slice;
// NOTE(rsc): Everything here could use cas if contention became an issue.
static Lock proflock
= LOCK_INITIALIZER
;
static Lock proflock;
// Per-call-stack allocation information.
// Lookup by hashing call stack into a linked-list hash table.
...
...
@@ -185,6 +185,12 @@ found:
return nil;
}
void
runtime_Mprof_Init()
{
runtime_initlock(&proflock);
}
// Called by malloc to record a profiled block.
void
runtime_MProf_Malloc(void *p, uintptr size)
...
...
libgo/runtime/runtime.h
View file @
a4ad1c7a
...
...
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <semaphore.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
...
...
@@ -53,7 +54,8 @@ typedef struct Lock Lock;
struct
Lock
{
pthread_mutex_t
mutex
;
uint32
key
;
sem_t
sem
;
};
/* A Note. */
...
...
@@ -119,6 +121,7 @@ struct M
void
*
runtime_mal
(
uintptr
);
void
runtime_mallocinit
(
void
);
void
runtime_initfintab
(
void
);
void
siginit
(
void
);
bool
__go_sigsend
(
int32
sig
);
int64
runtime_nanotime
(
void
);
...
...
@@ -138,12 +141,10 @@ void __go_cachestats(void);
* as fast as spin locks (just a few user-level instructions),
* but on the contention path they sleep in the kernel.
*/
#define LOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER }
void
runtime_initlock
(
Lock
*
);
void
runtime_lock
(
Lock
*
);
void
runtime_unlock
(
Lock
*
);
void
runtime_destroylock
(
Lock
*
);
bool
runtime_trylock
(
Lock
*
);
void
semacquire
(
uint32
*
)
asm
(
"libgo_runtime.runtime.Semacquire"
);
void
semrelease
(
uint32
*
)
asm
(
"libgo_runtime.runtime.Semrelease"
);
...
...
@@ -178,7 +179,7 @@ void runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *
void
runtime_walkfintab
(
void
(
*
fn
)(
void
*
),
void
(
*
scan
)(
byte
*
,
int64
));
#define runtime_mmap mmap
#define runtime_munmap(p, s) munmap((p), (s))
#define cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
#define
runtime_
cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
struct
__go_func_type
;
void
reflect_call
(
const
struct
__go_func_type
*
,
const
void
*
,
_Bool
,
void
**
,
...
...
libgo/runtime/sigqueue.goc
View file @
a4ad1c7a
...
...
@@ -67,7 +67,7 @@ __go_sigsend(int32 s)
mask
=
sig
.
mask
;
if
(
mask
&
bit
)
break
;
//
signal
already
in
queue
if
(
cas
(&
sig
.
mask
,
mask
,
mask
|
bit
))
{
if
(
runtime_
cas
(&
sig
.
mask
,
mask
,
mask
|
bit
))
{
//
Added
to
queue
.
//
Only
send
a
wakeup
for
the
first
signal
in
each
round
.
if
(
mask
==
0
)
...
...
@@ -86,7 +86,7 @@ func Sigrecv() (m uint32) {
noteclear
(&
sig
);
for
(;;)
{
m
=
sig
.
mask
;
if
(
cas
(&
sig
.
mask
,
m
,
0
))
if
(
runtime_
cas
(&
sig
.
mask
,
m
,
0
))
break
;
}
}
...
...
libgo/runtime/thread.c
View file @
a4ad1c7a
...
...
@@ -7,32 +7,67 @@
void
runtime_initlock
(
Lock
*
l
)
{
if
(
pthread_mutex_init
(
&
l
->
mutex
,
NULL
)
!=
0
)
runtime_throw
(
"pthread_mutex_init failed"
);
l
->
key
=
0
;
if
(
sem_init
(
&
l
->
sem
,
0
,
0
)
!=
0
)
runtime_throw
(
"sem_init failed"
);
}
static
uint32
runtime_xadd
(
uint32
volatile
*
val
,
int32
delta
)
{
uint32
oval
,
nval
;
for
(;;){
oval
=
*
val
;
nval
=
oval
+
delta
;
if
(
runtime_cas
(
val
,
oval
,
nval
))
return
nval
;
}
}
// noinline so that runtime_lock doesn't have to split the stack.
static
void
runtime_lock_full
(
Lock
*
l
)
__attribute__
((
noinline
));
static
void
runtime_lock_full
(
Lock
*
l
)
{
if
(
sem_wait
(
&
l
->
sem
)
!=
0
)
runtime_throw
(
"sem_wait failed"
);
}
void
runtime_lock
(
Lock
*
l
)
{
if
(
pthread_mutex_lock
(
&
l
->
mutex
)
!=
0
)
runtime_throw
(
"lock failed"
);
if
(
m
->
locks
<
0
)
runtime_throw
(
"lock count"
);
m
->
locks
++
;
if
(
runtime_xadd
(
&
l
->
key
,
1
)
>
1
)
// someone else has it; wait
runtime_lock_full
(
l
);
}
void
runtime_unlock
(
Lock
*
l
)
static
void
runtime_unlock_full
(
Lock
*
l
)
__attribute__
((
noinline
));
static
void
runtime_unlock_full
(
Lock
*
l
)
{
if
(
pthread_mutex_unlock
(
&
l
->
mutex
)
!=
0
)
runtime_throw
(
"
unlock
failed"
);
if
(
sem_post
(
&
l
->
sem
)
!=
0
)
runtime_throw
(
"
sem_post
failed"
);
}
void
runtime_
destroy
lock
(
Lock
*
l
)
runtime_
un
lock
(
Lock
*
l
)
{
pthread_mutex_destroy
(
&
l
->
mutex
);
m
->
locks
--
;
if
(
m
->
locks
<
0
)
runtime_throw
(
"lock count"
);
if
(
runtime_xadd
(
&
l
->
key
,
-
1
)
>
0
)
// someone else is waiting
runtime_unlock_full
(
l
);
}
bool
runtime_
tr
ylock
(
Lock
*
l
)
void
runtime_
destro
ylock
(
Lock
*
l
)
{
return
pthread_mutex_trylock
(
&
l
->
mutex
)
==
0
;
sem_destroy
(
&
l
->
sem
)
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment