Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mgcwork.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  const (
    14  	_WorkbufSize = 2048 // in bytes; larger values result in less contention
    15  
    16  	// workbufAlloc is the number of bytes to allocate at a time
    17  	// for new workbufs. This must be a multiple of pageSize and
    18  	// should be a multiple of _WorkbufSize.
    19  	//
    20  	// Larger values reduce workbuf allocation overhead. Smaller
    21  	// values reduce heap fragmentation.
    22  	workbufAlloc = 32 << 10
    23  )
    24  
    25  func init() {
    26  	if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
    27  		throw("bad workbufAlloc")
    28  	}
    29  }
    30  
    31  // Garbage collector work pool abstraction.
    32  //
    33  // This implements a producer/consumer model for pointers to grey
    34  // objects. A grey object is one that is marked and on a work
    35  // queue. A black object is marked and not on a work queue.
    36  //
    37  // Write barriers, root discovery, stack scanning, and object scanning
    38  // produce pointers to grey objects. Scanning consumes pointers to
    39  // grey objects, thus blackening them, and then scans them,
    40  // potentially producing new pointers to grey objects.
    41  
    42  // A gcWork provides the interface to produce and consume work for the
    43  // garbage collector.
    44  //
    45  // A gcWork can be used on the stack as follows:
    46  //
    47  //     (preemption must be disabled)
    48  //     gcw := &getg().m.p.ptr().gcw
    49  //     .. call gcw.put() to produce and gcw.tryGet() to consume ..
    50  //
    51  // It's important that any use of gcWork during the mark phase prevent
    52  // the garbage collector from transitioning to mark termination since
    53  // gcWork may locally hold GC work buffers. This can be done by
    54  // disabling preemption (systemstack or acquirem).
    55  type gcWork struct {
    56  	// wbuf1 and wbuf2 are the primary and secondary work buffers.
    57  	//
    58  	// This can be thought of as a stack of both work buffers'
    59  	// pointers concatenated. When we pop the last pointer, we
    60  	// shift the stack up by one work buffer by bringing in a new
    61  	// full buffer and discarding an empty one. When we fill both
    62  	// buffers, we shift the stack down by one work buffer by
    63  	// bringing in a new empty buffer and discarding a full one.
    64  	// This way we have one buffer's worth of hysteresis, which
    65  	// amortizes the cost of getting or putting a work buffer over
    66  	// at least one buffer of work and reduces contention on the
    67  	// global work lists.
    68  	//
    69  	// wbuf1 is always the buffer we're currently pushing to and
    70  	// popping from and wbuf2 is the buffer that will be discarded
    71  	// next.
    72  	//
    73  	// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
    74  	wbuf1, wbuf2 *workbuf
    75  
    76  	// Bytes marked (blackened) on this gcWork. This is aggregated
    77  	// into work.bytesMarked by dispose.
    78  	bytesMarked uint64
    79  
    80  	// Scan work performed on this gcWork. This is aggregated into
    81  	// gcController by dispose and may also be flushed by callers.
    82  	scanWork int64
    83  
    84  	// flushedWork indicates that a non-empty work buffer was
    85  	// flushed to the global work list since the last gcMarkDone
    86  	// termination check. Specifically, this indicates that this
    87  	// gcWork may have communicated work to another gcWork.
    88  	flushedWork bool
    89  }
    90  
    91  // Most of the methods of gcWork are go:nowritebarrierrec because the
    92  // write barrier itself can invoke gcWork methods but the methods are
    93  // not generally re-entrant. Hence, if a gcWork method invoked the
    94  // write barrier while the gcWork was in an inconsistent state, and
    95  // the write barrier in turn invoked a gcWork method, it could
    96  // permanently corrupt the gcWork.
    97  
    98  func (w *gcWork) init() {
    99  	w.wbuf1 = getempty()
   100  	wbuf2 := trygetfull()
   101  	if wbuf2 == nil {
   102  		wbuf2 = getempty()
   103  	}
   104  	w.wbuf2 = wbuf2
   105  }
   106  
   107  // put enqueues a pointer for the garbage collector to trace.
   108  // obj must point to the beginning of a heap object or an oblet.
   109  //go:nowritebarrierrec
   110  func (w *gcWork) put(obj uintptr) {
   111  	flushed := false
   112  	wbuf := w.wbuf1
   113  	// Record that this may acquire the wbufSpans or heap lock to
   114  	// allocate a workbuf.
   115  	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
   116  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   117  	if wbuf == nil {
   118  		w.init()
   119  		wbuf = w.wbuf1
   120  		// wbuf is empty at this point.
   121  	} else if wbuf.nobj == len(wbuf.obj) {
   122  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   123  		wbuf = w.wbuf1
   124  		if wbuf.nobj == len(wbuf.obj) {
   125  			putfull(wbuf)
   126  			w.flushedWork = true
   127  			wbuf = getempty()
   128  			w.wbuf1 = wbuf
   129  			flushed = true
   130  		}
   131  	}
   132  
   133  	wbuf.obj[wbuf.nobj] = obj
   134  	wbuf.nobj++
   135  
   136  	// If we put a buffer on full, let the GC controller know so
   137  	// it can encourage more workers to run. We delay this until
   138  	// the end of put so that w is in a consistent state, since
   139  	// enlistWorker may itself manipulate w.
   140  	if flushed && gcphase == _GCmark {
   141  		gcController.enlistWorker()
   142  	}
   143  }
   144  
   145  // putFast does a put and reports whether it can be done quickly
   146  // otherwise it returns false and the caller needs to call put.
   147  //go:nowritebarrierrec
   148  func (w *gcWork) putFast(obj uintptr) bool {
   149  	wbuf := w.wbuf1
   150  	if wbuf == nil {
   151  		return false
   152  	} else if wbuf.nobj == len(wbuf.obj) {
   153  		return false
   154  	}
   155  
   156  	wbuf.obj[wbuf.nobj] = obj
   157  	wbuf.nobj++
   158  	return true
   159  }
   160  
   161  // putBatch performs a put on every pointer in obj. See put for
   162  // constraints on these pointers.
   163  //
   164  //go:nowritebarrierrec
   165  func (w *gcWork) putBatch(obj []uintptr) {
   166  	if len(obj) == 0 {
   167  		return
   168  	}
   169  
   170  	flushed := false
   171  	wbuf := w.wbuf1
   172  	if wbuf == nil {
   173  		w.init()
   174  		wbuf = w.wbuf1
   175  	}
   176  
   177  	for len(obj) > 0 {
   178  		for wbuf.nobj == len(wbuf.obj) {
   179  			putfull(wbuf)
   180  			w.flushedWork = true
   181  			w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
   182  			wbuf = w.wbuf1
   183  			flushed = true
   184  		}
   185  		n := copy(wbuf.obj[wbuf.nobj:], obj)
   186  		wbuf.nobj += n
   187  		obj = obj[n:]
   188  	}
   189  
   190  	if flushed && gcphase == _GCmark {
   191  		gcController.enlistWorker()
   192  	}
   193  }
   194  
   195  // tryGet dequeues a pointer for the garbage collector to trace.
   196  //
   197  // If there are no pointers remaining in this gcWork or in the global
   198  // queue, tryGet returns 0.  Note that there may still be pointers in
   199  // other gcWork instances or other caches.
   200  //go:nowritebarrierrec
   201  func (w *gcWork) tryGet() uintptr {
   202  	wbuf := w.wbuf1
   203  	if wbuf == nil {
   204  		w.init()
   205  		wbuf = w.wbuf1
   206  		// wbuf is empty at this point.
   207  	}
   208  	if wbuf.nobj == 0 {
   209  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   210  		wbuf = w.wbuf1
   211  		if wbuf.nobj == 0 {
   212  			owbuf := wbuf
   213  			wbuf = trygetfull()
   214  			if wbuf == nil {
   215  				return 0
   216  			}
   217  			putempty(owbuf)
   218  			w.wbuf1 = wbuf
   219  		}
   220  	}
   221  
   222  	wbuf.nobj--
   223  	return wbuf.obj[wbuf.nobj]
   224  }
   225  
   226  // tryGetFast dequeues a pointer for the garbage collector to trace
   227  // if one is readily available. Otherwise it returns 0 and
   228  // the caller is expected to call tryGet().
   229  //go:nowritebarrierrec
   230  func (w *gcWork) tryGetFast() uintptr {
   231  	wbuf := w.wbuf1
   232  	if wbuf == nil {
   233  		return 0
   234  	}
   235  	if wbuf.nobj == 0 {
   236  		return 0
   237  	}
   238  
   239  	wbuf.nobj--
   240  	return wbuf.obj[wbuf.nobj]
   241  }
   242  
   243  // dispose returns any cached pointers to the global queue.
   244  // The buffers are being put on the full queue so that the
   245  // write barriers will not simply reacquire them before the
   246  // GC can inspect them. This helps reduce the mutator's
   247  // ability to hide pointers during the concurrent mark phase.
   248  //
   249  //go:nowritebarrierrec
   250  func (w *gcWork) dispose() {
   251  	if wbuf := w.wbuf1; wbuf != nil {
   252  		if wbuf.nobj == 0 {
   253  			putempty(wbuf)
   254  		} else {
   255  			putfull(wbuf)
   256  			w.flushedWork = true
   257  		}
   258  		w.wbuf1 = nil
   259  
   260  		wbuf = w.wbuf2
   261  		if wbuf.nobj == 0 {
   262  			putempty(wbuf)
   263  		} else {
   264  			putfull(wbuf)
   265  			w.flushedWork = true
   266  		}
   267  		w.wbuf2 = nil
   268  	}
   269  	if w.bytesMarked != 0 {
   270  		// dispose happens relatively infrequently. If this
   271  		// atomic becomes a problem, we should first try to
   272  		// dispose less and if necessary aggregate in a per-P
   273  		// counter.
   274  		atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
   275  		w.bytesMarked = 0
   276  	}
   277  	if w.scanWork != 0 {
   278  		atomic.Xaddint64(&gcController.scanWork, w.scanWork)
   279  		w.scanWork = 0
   280  	}
   281  }
   282  
   283  // balance moves some work that's cached in this gcWork back on the
   284  // global queue.
   285  //go:nowritebarrierrec
   286  func (w *gcWork) balance() {
   287  	if w.wbuf1 == nil {
   288  		return
   289  	}
   290  	if wbuf := w.wbuf2; wbuf.nobj != 0 {
   291  		putfull(wbuf)
   292  		w.flushedWork = true
   293  		w.wbuf2 = getempty()
   294  	} else if wbuf := w.wbuf1; wbuf.nobj > 4 {
   295  		w.wbuf1 = handoff(wbuf)
   296  		w.flushedWork = true // handoff did putfull
   297  	} else {
   298  		return
   299  	}
   300  	// We flushed a buffer to the full list, so wake a worker.
   301  	if gcphase == _GCmark {
   302  		gcController.enlistWorker()
   303  	}
   304  }
   305  
   306  // empty reports whether w has no mark work available.
   307  //go:nowritebarrierrec
   308  func (w *gcWork) empty() bool {
   309  	return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
   310  }
   311  
   312  // Internally, the GC work pool is kept in arrays in work buffers.
   313  // The gcWork interface caches a work buffer until full (or empty) to
   314  // avoid contending on the global work buffer lists.
   315  
   316  type workbufhdr struct {
   317  	node lfnode // must be first
   318  	nobj int
   319  }
   320  
   321  //go:notinheap
   322  type workbuf struct {
   323  	workbufhdr
   324  	// account for the above fields
   325  	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
   326  }
   327  
   328  // workbuf factory routines. These funcs are used to manage the
   329  // workbufs.
   330  // If the GC asks for some work these are the only routines that
   331  // make wbufs available to the GC.
   332  
   333  func (b *workbuf) checknonempty() {
   334  	if b.nobj == 0 {
   335  		throw("workbuf is empty")
   336  	}
   337  }
   338  
   339  func (b *workbuf) checkempty() {
   340  	if b.nobj != 0 {
   341  		throw("workbuf is not empty")
   342  	}
   343  }
   344  
   345  // getempty pops an empty work buffer off the work.empty list,
   346  // allocating new buffers if none are available.
   347  //go:nowritebarrier
   348  func getempty() *workbuf {
   349  	var b *workbuf
   350  	if work.empty != 0 {
   351  		b = (*workbuf)(work.empty.pop())
   352  		if b != nil {
   353  			b.checkempty()
   354  		}
   355  	}
   356  	// Record that this may acquire the wbufSpans or heap lock to
   357  	// allocate a workbuf.
   358  	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
   359  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   360  	if b == nil {
   361  		// Allocate more workbufs.
   362  		var s *mspan
   363  		if work.wbufSpans.free.first != nil {
   364  			lock(&work.wbufSpans.lock)
   365  			s = work.wbufSpans.free.first
   366  			if s != nil {
   367  				work.wbufSpans.free.remove(s)
   368  				work.wbufSpans.busy.insert(s)
   369  			}
   370  			unlock(&work.wbufSpans.lock)
   371  		}
   372  		if s == nil {
   373  			systemstack(func() {
   374  				s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
   375  			})
   376  			if s == nil {
   377  				throw("out of memory")
   378  			}
   379  			// Record the new span in the busy list.
   380  			lock(&work.wbufSpans.lock)
   381  			work.wbufSpans.busy.insert(s)
   382  			unlock(&work.wbufSpans.lock)
   383  		}
   384  		// Slice up the span into new workbufs. Return one and
   385  		// put the rest on the empty list.
   386  		for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
   387  			newb := (*workbuf)(unsafe.Pointer(s.base() + i))
   388  			newb.nobj = 0
   389  			lfnodeValidate(&newb.node)
   390  			if i == 0 {
   391  				b = newb
   392  			} else {
   393  				putempty(newb)
   394  			}
   395  		}
   396  	}
   397  	return b
   398  }
   399  
   400  // putempty puts a workbuf onto the work.empty list.
   401  // Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
   402  //go:nowritebarrier
   403  func putempty(b *workbuf) {
   404  	b.checkempty()
   405  	work.empty.push(&b.node)
   406  }
   407  
   408  // putfull puts the workbuf on the work.full list for the GC.
   409  // putfull accepts partially full buffers so the GC can avoid competing
   410  // with the mutators for ownership of partially full buffers.
   411  //go:nowritebarrier
   412  func putfull(b *workbuf) {
   413  	b.checknonempty()
   414  	work.full.push(&b.node)
   415  }
   416  
   417  // trygetfull tries to get a full or partially empty workbuffer.
   418  // If one is not immediately available return nil
   419  //go:nowritebarrier
   420  func trygetfull() *workbuf {
   421  	b := (*workbuf)(work.full.pop())
   422  	if b != nil {
   423  		b.checknonempty()
   424  		return b
   425  	}
   426  	return b
   427  }
   428  
   429  //go:nowritebarrier
   430  func handoff(b *workbuf) *workbuf {
   431  	// Make new buffer with half of b's pointers.
   432  	b1 := getempty()
   433  	n := b.nobj / 2
   434  	b.nobj -= n
   435  	b1.nobj = n
   436  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
   437  
   438  	// Put b on full list - let first half of b get stolen.
   439  	putfull(b)
   440  	return b1
   441  }
   442  
   443  // prepareFreeWorkbufs moves busy workbuf spans to free list so they
   444  // can be freed to the heap. This must only be called when all
   445  // workbufs are on the empty list.
   446  func prepareFreeWorkbufs() {
   447  	lock(&work.wbufSpans.lock)
   448  	if work.full != 0 {
   449  		throw("cannot free workbufs when work.full != 0")
   450  	}
   451  	// Since all workbufs are on the empty list, we don't care
   452  	// which ones are in which spans. We can wipe the entire empty
   453  	// list and move all workbuf spans to the free list.
   454  	work.empty = 0
   455  	work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
   456  	unlock(&work.wbufSpans.lock)
   457  }
   458  
   459  // freeSomeWbufs frees some workbufs back to the heap and returns
   460  // true if it should be called again to free more.
   461  func freeSomeWbufs(preemptible bool) bool {
   462  	const batchSize = 64 // ~1–2 µs per span.
   463  	lock(&work.wbufSpans.lock)
   464  	if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
   465  		unlock(&work.wbufSpans.lock)
   466  		return false
   467  	}
   468  	systemstack(func() {
   469  		gp := getg().m.curg
   470  		for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
   471  			span := work.wbufSpans.free.first
   472  			if span == nil {
   473  				break
   474  			}
   475  			work.wbufSpans.free.remove(span)
   476  			mheap_.freeManual(span, spanAllocWorkBuf)
   477  		}
   478  	})
   479  	more := !work.wbufSpans.free.isEmpty()
   480  	unlock(&work.wbufSpans.lock)
   481  	return more
   482  }
   483  

View as plain text