Commit 4f6bdb08 by Ian Lance Taylor

runtime: be more strict in GC

    
    With CL 190599, along with what we do in greyobject, we ensure
    that we only mark allocated heap objects. As a result we can be
    more strict in GC:
    
    - Enable "sweep increased allocation count" check, which checks
      that the number of mark bits set are no more than the number of
      allocation bits.
    
    - Enable invalid pointer check on heap scan. We only trace
      allocated heap objects, which should not contain invalid
      pointer.
    
    This also makes the libgo runtime more convergent with the gc
    runtime.
    
    Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/190797

From-SVN: r274678
parent 188d0079
85857977230437f2b3dcbeea009efbb8b2789039 b0ba5daa8216a0424b24f74466cedab0b986f3b4
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.
...@@ -56,15 +56,6 @@ retry: ...@@ -56,15 +56,6 @@ retry:
c.empty.insertBack(s) c.empty.insertBack(s)
unlock(&c.lock) unlock(&c.lock)
s.sweep(true) s.sweep(true)
// With gccgo's conservative GC, the returned span may
// now be full. See the comments in mspan.sweep.
if uintptr(s.allocCount) == s.nelems {
s.freeindex = s.nelems
lock(&c.lock)
goto retry
}
goto havespan goto havespan
} }
if s.sweepgen == sg-1 { if s.sweepgen == sg-1 {
......
...@@ -326,39 +326,16 @@ func (s *mspan) sweep(preserve bool) bool { ...@@ -326,39 +326,16 @@ func (s *mspan) sweep(preserve bool) bool {
freeToHeap = true freeToHeap = true
} }
nfreed := s.allocCount - nalloc nfreed := s.allocCount - nalloc
// This check is not reliable with gccgo, because of
// conservative stack scanning. The test boils down to
// checking that no new bits have been set in gcmarkBits since
// the span was added to the sweep count. New bits are set by
// greyobject. Seeing a new bit means that a live pointer has
// appeared that was not found during the mark phase. That can
// not happen when pointers are followed strictly. However,
// with conservative checking, it is possible for a pointer
// that will never be used to appear live and to cause a mark
// to be added. That is unfortunate in that it causes this
// check to be inaccurate, and it will keep an object live
// unnecessarily, but provided the pointer is not really live
// it is not otherwise a problem. So we disable the test for gccgo.
nfreedSigned := int(nfreed)
if nalloc > s.allocCount { if nalloc > s.allocCount {
if usestackmaps {
print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
throw("sweep increased allocation count") throw("sweep increased allocation count")
} }
// For gccgo, adjust the freed count as a signed number.
nfreedSigned = int(s.allocCount) - int(nalloc)
if uintptr(nalloc) == s.nelems {
s.freeindex = s.nelems
}
}
s.allocCount = nalloc s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span. s.freeindex = 0 // reset allocation index to start of span.
if trace.enabled { if trace.enabled {
getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
} }
// gcmarkBits becomes the allocBits. // gcmarkBits becomes the allocBits.
...@@ -374,7 +351,7 @@ func (s *mspan) sweep(preserve bool) bool { ...@@ -374,7 +351,7 @@ func (s *mspan) sweep(preserve bool) bool {
// But we need to set it before we make the span available for allocation // But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a // (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation. // span is already swept if available for allocation.
if freeToHeap || nfreedSigned <= 0 { if freeToHeap || nfreed == 0 {
// The span must be in our exclusive ownership until we update sweepgen, // The span must be in our exclusive ownership until we update sweepgen,
// check for potential races. // check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
...@@ -387,11 +364,8 @@ func (s *mspan) sweep(preserve bool) bool { ...@@ -387,11 +364,8 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen) atomic.Store(&s.sweepgen, sweepgen)
} }
if spc.sizeclass() != 0 { if nfreed > 0 && spc.sizeclass() != 0 {
c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned) c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
}
if nfreedSigned > 0 && spc.sizeclass() != 0 {
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty) res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// mcentral.freeSpan updates sweepgen // mcentral.freeSpan updates sweepgen
} else if freeToHeap { } else if freeToHeap {
......
...@@ -352,19 +352,12 @@ func parsedebugvars() { ...@@ -352,19 +352,12 @@ func parsedebugvars() {
// defaults // defaults
debug.cgocheck = 1 debug.cgocheck = 1
// Unfortunately, because gccgo uses conservative stack scanning, // Gccgo uses conservative stack scanning, so we cannot check
// we can not enable invalid pointer checking. It is possible for // invalid pointers on stack. But we can still enable invalid
// memory block M1 to point to M2, and for both to be dead. // pointer check on heap scanning. When scanning the heap, we
// We release M2, causing the entire span to be released. // ensure that we only trace allocated heap objects, which should
// Before we release M1, a stack pointer appears that point into it. // not contain invalid pointers.
// This stack pointer is presumably dead, but causes M1 to be marked.
// We scan M1 and see the pointer to M2 on a released span.
// At that point, if debug.invalidptr is set, we crash.
// This is not a problem, assuming that M1 really is dead and
// the pointer we discovered to it will not be used.
if usestackmaps {
debug.invalidptr = 1 debug.invalidptr = 1
}
for p := gogetenv("GODEBUG"); p != ""; { for p := gogetenv("GODEBUG"); p != ""; {
field := "" field := ""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment