Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mfinal.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: finalizers and block profiling.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"runtime/internal/atomic"
    12  	"runtime/internal/sys"
    13  	"unsafe"
    14  )
    15  
    16  // finblock is an array of finalizers to be executed. finblocks are
    17  // arranged in a linked list for the finalizer queue.
    18  //
    19  // finblock is allocated from non-GC'd memory, so any heap pointers
    20  // must be specially handled. GC currently assumes that the finalizer
    21  // queue does not grow during marking (but it can shrink).
    22  //
    23  //go:notinheap
    24  type finblock struct {
    25  	alllink *finblock
    26  	next    *finblock
    27  	cnt     uint32
    28  	_       int32
    29  	fin     [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
    30  }
    31  
    32  var finlock mutex  // protects the following variables
    33  var fing *g        // goroutine that runs finalizers
    34  var finq *finblock // list of finalizers that are to be executed
    35  var finc *finblock // cache of free blocks
    36  var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
    37  var fingwait bool
    38  var fingwake bool
    39  var allfin *finblock // list of all blocks
    40  
    41  // NOTE: Layout known to queuefinalizer.
    42  type finalizer struct {
    43  	fn   *funcval       // function to call (may be a heap pointer)
    44  	arg  unsafe.Pointer // ptr to object (may be a heap pointer)
    45  	nret uintptr        // bytes of return values from fn
    46  	fint *_type         // type of first argument of fn
    47  	ot   *ptrtype       // type of ptr to object (may be a heap pointer)
    48  }
    49  
    50  var finalizer1 = [...]byte{
    51  	// Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
    52  	// Each byte describes 8 words.
    53  	// Need 8 Finalizers described by 5 bytes before pattern repeats:
    54  	//	ptr ptr INT ptr ptr
    55  	//	ptr ptr INT ptr ptr
    56  	//	ptr ptr INT ptr ptr
    57  	//	ptr ptr INT ptr ptr
    58  	//	ptr ptr INT ptr ptr
    59  	//	ptr ptr INT ptr ptr
    60  	//	ptr ptr INT ptr ptr
    61  	//	ptr ptr INT ptr ptr
    62  	// aka
    63  	//
    64  	//	ptr ptr INT ptr ptr ptr ptr INT
    65  	//	ptr ptr ptr ptr INT ptr ptr ptr
    66  	//	ptr INT ptr ptr ptr ptr INT ptr
    67  	//	ptr ptr ptr INT ptr ptr ptr ptr
    68  	//	INT ptr ptr ptr ptr INT ptr ptr
    69  	//
    70  	// Assumptions about Finalizer layout checked below.
    71  	1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
    72  	1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
    73  	1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
    74  	1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
    75  	0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
    76  }
    77  
    78  func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
    79  	if gcphase != _GCoff {
    80  		// Currently we assume that the finalizer queue won't
    81  		// grow during marking so we don't have to rescan it
    82  		// during mark termination. If we ever need to lift
    83  		// this assumption, we can do it by adding the
    84  		// necessary barriers to queuefinalizer (which it may
    85  		// have automatically).
    86  		throw("queuefinalizer during GC")
    87  	}
    88  
    89  	lock(&finlock)
    90  	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
    91  		if finc == nil {
    92  			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
    93  			finc.alllink = allfin
    94  			allfin = finc
    95  			if finptrmask[0] == 0 {
    96  				// Build pointer mask for Finalizer array in block.
    97  				// Check assumptions made in finalizer1 array above.
    98  				if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
    99  					unsafe.Offsetof(finalizer{}.fn) != 0 ||
   100  					unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
   101  					unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
   102  					unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
   103  					unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
   104  					throw("finalizer out of sync")
   105  				}
   106  				for i := range finptrmask {
   107  					finptrmask[i] = finalizer1[i%len(finalizer1)]
   108  				}
   109  			}
   110  		}
   111  		block := finc
   112  		finc = block.next
   113  		block.next = finq
   114  		finq = block
   115  	}
   116  	f := &finq.fin[finq.cnt]
   117  	atomic.Xadd(&finq.cnt, +1) // Sync with markroots
   118  	f.fn = fn
   119  	f.nret = nret
   120  	f.fint = fint
   121  	f.ot = ot
   122  	f.arg = p
   123  	fingwake = true
   124  	unlock(&finlock)
   125  }
   126  
   127  //go:nowritebarrier
   128  func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
   129  	for fb := allfin; fb != nil; fb = fb.alllink {
   130  		for i := uint32(0); i < fb.cnt; i++ {
   131  			f := &fb.fin[i]
   132  			callback(f.fn, f.arg, f.nret, f.fint, f.ot)
   133  		}
   134  	}
   135  }
   136  
   137  func wakefing() *g {
   138  	var res *g
   139  	lock(&finlock)
   140  	if fingwait && fingwake {
   141  		fingwait = false
   142  		fingwake = false
   143  		res = fing
   144  	}
   145  	unlock(&finlock)
   146  	return res
   147  }
   148  
   149  var (
   150  	fingCreate  uint32
   151  	fingRunning bool
   152  )
   153  
   154  func createfing() {
   155  	// start the finalizer goroutine exactly once
   156  	if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
   157  		go runfinq()
   158  	}
   159  }
   160  
   161  // This is the goroutine that runs all of the finalizers
   162  func runfinq() {
   163  	var (
   164  		frame    unsafe.Pointer
   165  		framecap uintptr
   166  		argRegs  int
   167  	)
   168  
   169  	for {
   170  		lock(&finlock)
   171  		fb := finq
   172  		finq = nil
   173  		if fb == nil {
   174  			gp := getg()
   175  			fing = gp
   176  			fingwait = true
   177  			goparkunlock(&finlock, waitReasonFinalizerWait, traceEvGoBlock, 1)
   178  			continue
   179  		}
   180  		argRegs = intArgRegs
   181  		unlock(&finlock)
   182  		if raceenabled {
   183  			racefingo()
   184  		}
   185  		for fb != nil {
   186  			for i := fb.cnt; i > 0; i-- {
   187  				f := &fb.fin[i-1]
   188  
   189  				var regs abi.RegArgs
   190  				var framesz uintptr
   191  				if argRegs > 0 {
   192  					// The args can always be passed in registers if they're
   193  					// available, because platforms we support always have no
   194  					// argument registers available, or more than 2.
   195  					//
   196  					// But unfortunately because we can have an arbitrary
   197  					// amount of returns and it would be complex to try and
   198  					// figure out how many of those can get passed in registers,
   199  					// just conservatively assume none of them do.
   200  					framesz = f.nret
   201  				} else {
   202  					// Need to pass arguments on the stack too.
   203  					framesz = unsafe.Sizeof((interface{})(nil)) + f.nret
   204  				}
   205  				if framecap < framesz {
   206  					// The frame does not contain pointers interesting for GC,
   207  					// all not yet finalized objects are stored in finq.
   208  					// If we do not mark it as FlagNoScan,
   209  					// the last finalized object is not collected.
   210  					frame = mallocgc(framesz, nil, true)
   211  					framecap = framesz
   212  				}
   213  
   214  				if f.fint == nil {
   215  					throw("missing type in runfinq")
   216  				}
   217  				r := frame
   218  				if argRegs > 0 {
   219  					r = unsafe.Pointer(&regs.Ints)
   220  				} else {
   221  					// frame is effectively uninitialized
   222  					// memory. That means we have to clear
   223  					// it before writing to it to avoid
   224  					// confusing the write barrier.
   225  					*(*[2]uintptr)(frame) = [2]uintptr{}
   226  				}
   227  				switch f.fint.kind & kindMask {
   228  				case kindPtr:
   229  					// direct use of pointer
   230  					*(*unsafe.Pointer)(r) = f.arg
   231  				case kindInterface:
   232  					ityp := (*interfacetype)(unsafe.Pointer(f.fint))
   233  					// set up with empty interface
   234  					(*eface)(r)._type = &f.ot.typ
   235  					(*eface)(r).data = f.arg
   236  					if len(ityp.mhdr) != 0 {
   237  						// convert to interface with methods
   238  						// this conversion is guaranteed to succeed - we checked in SetFinalizer
   239  						(*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type)
   240  					}
   241  				default:
   242  					throw("bad kind in runfinq")
   243  				}
   244  				fingRunning = true
   245  				reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
   246  				fingRunning = false
   247  
   248  				// Drop finalizer queue heap references
   249  				// before hiding them from markroot.
   250  				// This also ensures these will be
   251  				// clear if we reuse the finalizer.
   252  				f.fn = nil
   253  				f.arg = nil
   254  				f.ot = nil
   255  				atomic.Store(&fb.cnt, i-1)
   256  			}
   257  			next := fb.next
   258  			lock(&finlock)
   259  			fb.next = finc
   260  			finc = fb
   261  			unlock(&finlock)
   262  			fb = next
   263  		}
   264  	}
   265  }
   266  
   267  // SetFinalizer sets the finalizer associated with obj to the provided
   268  // finalizer function. When the garbage collector finds an unreachable block
   269  // with an associated finalizer, it clears the association and runs
   270  // finalizer(obj) in a separate goroutine. This makes obj reachable again,
   271  // but now without an associated finalizer. Assuming that SetFinalizer
   272  // is not called again, the next time the garbage collector sees
   273  // that obj is unreachable, it will free obj.
   274  //
   275  // SetFinalizer(obj, nil) clears any finalizer associated with obj.
   276  //
   277  // The argument obj must be a pointer to an object allocated by calling
   278  // new, by taking the address of a composite literal, or by taking the
   279  // address of a local variable.
   280  // The argument finalizer must be a function that takes a single argument
   281  // to which obj's type can be assigned, and can have arbitrary ignored return
   282  // values. If either of these is not true, SetFinalizer may abort the
   283  // program.
   284  //
   285  // Finalizers are run in dependency order: if A points at B, both have
   286  // finalizers, and they are otherwise unreachable, only the finalizer
   287  // for A runs; once A is freed, the finalizer for B can run.
   288  // If a cyclic structure includes a block with a finalizer, that
   289  // cycle is not guaranteed to be garbage collected and the finalizer
   290  // is not guaranteed to run, because there is no ordering that
   291  // respects the dependencies.
   292  //
   293  // The finalizer is scheduled to run at some arbitrary time after the
   294  // program can no longer reach the object to which obj points.
   295  // There is no guarantee that finalizers will run before a program exits,
   296  // so typically they are useful only for releasing non-memory resources
   297  // associated with an object during a long-running program.
   298  // For example, an os.File object could use a finalizer to close the
   299  // associated operating system file descriptor when a program discards
   300  // an os.File without calling Close, but it would be a mistake
   301  // to depend on a finalizer to flush an in-memory I/O buffer such as a
   302  // bufio.Writer, because the buffer would not be flushed at program exit.
   303  //
   304  // It is not guaranteed that a finalizer will run if the size of *obj is
   305  // zero bytes.
   306  //
   307  // It is not guaranteed that a finalizer will run for objects allocated
   308  // in initializers for package-level variables. Such objects may be
   309  // linker-allocated, not heap-allocated.
   310  //
   311  // A finalizer may run as soon as an object becomes unreachable.
   312  // In order to use finalizers correctly, the program must ensure that
   313  // the object is reachable until it is no longer required.
   314  // Objects stored in global variables, or that can be found by tracing
   315  // pointers from a global variable, are reachable. For other objects,
   316  // pass the object to a call of the KeepAlive function to mark the
   317  // last point in the function where the object must be reachable.
   318  //
   319  // For example, if p points to a struct, such as os.File, that contains
   320  // a file descriptor d, and p has a finalizer that closes that file
   321  // descriptor, and if the last use of p in a function is a call to
   322  // syscall.Write(p.d, buf, size), then p may be unreachable as soon as
   323  // the program enters syscall.Write. The finalizer may run at that moment,
   324  // closing p.d, causing syscall.Write to fail because it is writing to
   325  // a closed file descriptor (or, worse, to an entirely different
   326  // file descriptor opened by a different goroutine). To avoid this problem,
   327  // call runtime.KeepAlive(p) after the call to syscall.Write.
   328  //
   329  // A single goroutine runs all finalizers for a program, sequentially.
   330  // If a finalizer must run for a long time, it should do so by starting
   331  // a new goroutine.
   332  func SetFinalizer(obj interface{}, finalizer interface{}) {
   333  	if debug.sbrk != 0 {
   334  		// debug.sbrk never frees memory, so no finalizers run
   335  		// (and we don't have the data structures to record them).
   336  		return
   337  	}
   338  	e := efaceOf(&obj)
   339  	etyp := e._type
   340  	if etyp == nil {
   341  		throw("runtime.SetFinalizer: first argument is nil")
   342  	}
   343  	if etyp.kind&kindMask != kindPtr {
   344  		throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
   345  	}
   346  	ot := (*ptrtype)(unsafe.Pointer(etyp))
   347  	if ot.elem == nil {
   348  		throw("nil elem type!")
   349  	}
   350  
   351  	// find the containing object
   352  	base, _, _ := findObject(uintptr(e.data), 0, 0)
   353  
   354  	if base == 0 {
   355  		// 0-length objects are okay.
   356  		if e.data == unsafe.Pointer(&zerobase) {
   357  			return
   358  		}
   359  
   360  		// Global initializers might be linker-allocated.
   361  		//	var Foo = &Object{}
   362  		//	func main() {
   363  		//		runtime.SetFinalizer(Foo, nil)
   364  		//	}
   365  		// The relevant segments are: noptrdata, data, bss, noptrbss.
   366  		// We cannot assume they are in any order or even contiguous,
   367  		// due to external linking.
   368  		for datap := &firstmoduledata; datap != nil; datap = datap.next {
   369  			if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
   370  				datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
   371  				datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
   372  				datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
   373  				return
   374  			}
   375  		}
   376  		throw("runtime.SetFinalizer: pointer not in allocated block")
   377  	}
   378  
   379  	if uintptr(e.data) != base {
   380  		// As an implementation detail we allow to set finalizers for an inner byte
   381  		// of an object if it could come from tiny alloc (see mallocgc for details).
   382  		if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
   383  			throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
   384  		}
   385  	}
   386  
   387  	f := efaceOf(&finalizer)
   388  	ftyp := f._type
   389  	if ftyp == nil {
   390  		// switch to system stack and remove finalizer
   391  		systemstack(func() {
   392  			removefinalizer(e.data)
   393  		})
   394  		return
   395  	}
   396  
   397  	if ftyp.kind&kindMask != kindFunc {
   398  		throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
   399  	}
   400  	ft := (*functype)(unsafe.Pointer(ftyp))
   401  	if ft.dotdotdot() {
   402  		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
   403  	}
   404  	if ft.inCount != 1 {
   405  		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
   406  	}
   407  	fint := ft.in()[0]
   408  	switch {
   409  	case fint == etyp:
   410  		// ok - same type
   411  		goto okarg
   412  	case fint.kind&kindMask == kindPtr:
   413  		if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
   414  			// ok - not same type, but both pointers,
   415  			// one or the other is unnamed, and same element type, so assignable.
   416  			goto okarg
   417  		}
   418  	case fint.kind&kindMask == kindInterface:
   419  		ityp := (*interfacetype)(unsafe.Pointer(fint))
   420  		if len(ityp.mhdr) == 0 {
   421  			// ok - satisfies empty interface
   422  			goto okarg
   423  		}
   424  		if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil {
   425  			goto okarg
   426  		}
   427  	}
   428  	throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
   429  okarg:
   430  	// compute size needed for return parameters
   431  	nret := uintptr(0)
   432  	for _, t := range ft.out() {
   433  		nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
   434  	}
   435  	nret = alignUp(nret, sys.PtrSize)
   436  
   437  	// make sure we have a finalizer goroutine
   438  	createfing()
   439  
   440  	systemstack(func() {
   441  		if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
   442  			throw("runtime.SetFinalizer: finalizer already set")
   443  		}
   444  	})
   445  }
   446  
   447  // Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
   448  //go:noinline
   449  
   450  // KeepAlive marks its argument as currently reachable.
   451  // This ensures that the object is not freed, and its finalizer is not run,
   452  // before the point in the program where KeepAlive is called.
   453  //
   454  // A very simplified example showing where KeepAlive is required:
   455  // 	type File struct { d int }
   456  // 	d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
   457  // 	// ... do something if err != nil ...
   458  // 	p := &File{d}
   459  // 	runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
   460  // 	var buf [10]byte
   461  // 	n, err := syscall.Read(p.d, buf[:])
   462  // 	// Ensure p is not finalized until Read returns.
   463  // 	runtime.KeepAlive(p)
   464  // 	// No more uses of p after this point.
   465  //
   466  // Without the KeepAlive call, the finalizer could run at the start of
   467  // syscall.Read, closing the file descriptor before syscall.Read makes
   468  // the actual system call.
   469  //
   470  // Note: KeepAlive should only be used to prevent finalizers from
   471  // running prematurely. In particular, when used with unsafe.Pointer,
   472  // the rules for valid uses of unsafe.Pointer still apply.
   473  func KeepAlive(x interface{}) {
   474  	// Introduce a use of x that the compiler can't eliminate.
   475  	// This makes sure x is alive on entry. We need x to be alive
   476  	// on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
   477  	if cgoAlwaysFalse {
   478  		println(x)
   479  	}
   480  }
   481  

View as plain text