Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mranges.go

Documentation: runtime

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Address range data structure.
     6  //
     7  // This file contains an implementation of a data structure which
     8  // manages ordered address ranges.
     9  
    10  package runtime
    11  
    12  import (
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  // addrRange represents a region of address space.
    18  //
    19  // An addrRange must never span a gap in the address space.
    20  type addrRange struct {
    21  	// base and limit together represent the region of address space
    22  	// [base, limit). That is, base is inclusive, limit is exclusive.
    23  	// These are address over an offset view of the address space on
    24  	// platforms with a segmented address space, that is, on platforms
    25  	// where arenaBaseOffset != 0.
    26  	base, limit offAddr
    27  }
    28  
    29  // makeAddrRange creates a new address range from two virtual addresses.
    30  //
    31  // Throws if the base and limit are not in the same memory segment.
    32  func makeAddrRange(base, limit uintptr) addrRange {
    33  	r := addrRange{offAddr{base}, offAddr{limit}}
    34  	if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
    35  		throw("addr range base and limit are not in the same memory segment")
    36  	}
    37  	return r
    38  }
    39  
    40  // size returns the size of the range represented in bytes.
    41  func (a addrRange) size() uintptr {
    42  	if !a.base.lessThan(a.limit) {
    43  		return 0
    44  	}
    45  	// Subtraction is safe because limit and base must be in the same
    46  	// segment of the address space.
    47  	return a.limit.diff(a.base)
    48  }
    49  
    50  // contains returns whether or not the range contains a given address.
    51  func (a addrRange) contains(addr uintptr) bool {
    52  	return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
    53  }
    54  
    55  // subtract takes the addrRange toPrune and cuts out any overlap with
    56  // from, then returns the new range. subtract assumes that a and b
    57  // either don't overlap at all, only overlap on one side, or are equal.
    58  // If b is strictly contained in a, thus forcing a split, it will throw.
    59  func (a addrRange) subtract(b addrRange) addrRange {
    60  	if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
    61  		return addrRange{}
    62  	} else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
    63  		throw("bad prune")
    64  	} else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
    65  		a.base = b.limit
    66  	} else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
    67  		a.limit = b.base
    68  	}
    69  	return a
    70  }
    71  
    72  // removeGreaterEqual removes all addresses in a greater than or equal
    73  // to addr and returns the new range.
    74  func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
    75  	if (offAddr{addr}).lessEqual(a.base) {
    76  		return addrRange{}
    77  	}
    78  	if a.limit.lessEqual(offAddr{addr}) {
    79  		return a
    80  	}
    81  	return makeAddrRange(a.base.addr(), addr)
    82  }
    83  
    84  var (
    85  	// minOffAddr is the minimum address in the offset space, and
    86  	// it corresponds to the virtual address arenaBaseOffset.
    87  	minOffAddr = offAddr{arenaBaseOffset}
    88  
    89  	// maxOffAddr is the maximum address in the offset address
    90  	// space. It corresponds to the highest virtual address representable
    91  	// by the page alloc chunk and heap arena maps.
    92  	maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
    93  )
    94  
    95  // offAddr represents an address in a contiguous view
    96  // of the address space on systems where the address space is
    97  // segmented. On other systems, it's just a normal address.
    98  type offAddr struct {
    99  	// a is just the virtual address, but should never be used
   100  	// directly. Call addr() to get this value instead.
   101  	a uintptr
   102  }
   103  
   104  // add adds a uintptr offset to the offAddr.
   105  func (l offAddr) add(bytes uintptr) offAddr {
   106  	return offAddr{a: l.a + bytes}
   107  }
   108  
   109  // sub subtracts a uintptr offset from the offAddr.
   110  func (l offAddr) sub(bytes uintptr) offAddr {
   111  	return offAddr{a: l.a - bytes}
   112  }
   113  
   114  // diff returns the amount of bytes in between the
   115  // two offAddrs.
   116  func (l1 offAddr) diff(l2 offAddr) uintptr {
   117  	return l1.a - l2.a
   118  }
   119  
   120  // lessThan returns true if l1 is less than l2 in the offset
   121  // address space.
   122  func (l1 offAddr) lessThan(l2 offAddr) bool {
   123  	return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
   124  }
   125  
   126  // lessEqual returns true if l1 is less than or equal to l2 in
   127  // the offset address space.
   128  func (l1 offAddr) lessEqual(l2 offAddr) bool {
   129  	return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
   130  }
   131  
   132  // equal returns true if the two offAddr values are equal.
   133  func (l1 offAddr) equal(l2 offAddr) bool {
   134  	// No need to compare in the offset space, it
   135  	// means the same thing.
   136  	return l1 == l2
   137  }
   138  
   139  // addr returns the virtual address for this offset address.
   140  func (l offAddr) addr() uintptr {
   141  	return l.a
   142  }
   143  
   144  // addrRanges is a data structure holding a collection of ranges of
   145  // address space.
   146  //
   147  // The ranges are coalesced eagerly to reduce the
   148  // number ranges it holds.
   149  //
   150  // The slice backing store for this field is persistentalloc'd
   151  // and thus there is no way to free it.
   152  //
   153  // addrRanges is not thread-safe.
   154  type addrRanges struct {
   155  	// ranges is a slice of ranges sorted by base.
   156  	ranges []addrRange
   157  
   158  	// totalBytes is the total amount of address space in bytes counted by
   159  	// this addrRanges.
   160  	totalBytes uintptr
   161  
   162  	// sysStat is the stat to track allocations by this type
   163  	sysStat *sysMemStat
   164  }
   165  
   166  func (a *addrRanges) init(sysStat *sysMemStat) {
   167  	ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
   168  	ranges.len = 0
   169  	ranges.cap = 16
   170  	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
   171  	a.sysStat = sysStat
   172  	a.totalBytes = 0
   173  }
   174  
   175  // findSucc returns the first index in a such that addr is
   176  // less than the base of the addrRange at that index.
   177  func (a *addrRanges) findSucc(addr uintptr) int {
   178  	base := offAddr{addr}
   179  
   180  	// Narrow down the search space via a binary search
   181  	// for large addrRanges until we have at most iterMax
   182  	// candidates left.
   183  	const iterMax = 8
   184  	bot, top := 0, len(a.ranges)
   185  	for top-bot > iterMax {
   186  		i := ((top - bot) / 2) + bot
   187  		if a.ranges[i].contains(base.addr()) {
   188  			// a.ranges[i] contains base, so
   189  			// its successor is the next index.
   190  			return i + 1
   191  		}
   192  		if base.lessThan(a.ranges[i].base) {
   193  			// In this case i might actually be
   194  			// the successor, but we can't be sure
   195  			// until we check the ones before it.
   196  			top = i
   197  		} else {
   198  			// In this case we know base is
   199  			// greater than or equal to a.ranges[i].limit-1,
   200  			// so i is definitely not the successor.
   201  			// We already checked i, so pick the next
   202  			// one.
   203  			bot = i + 1
   204  		}
   205  	}
   206  	// There are top-bot candidates left, so
   207  	// iterate over them and find the first that
   208  	// base is strictly less than.
   209  	for i := bot; i < top; i++ {
   210  		if base.lessThan(a.ranges[i].base) {
   211  			return i
   212  		}
   213  	}
   214  	return top
   215  }
   216  
   217  // findAddrGreaterEqual returns the smallest address represented by a
   218  // that is >= addr. Thus, if the address is represented by a,
   219  // then it returns addr. The second return value indicates whether
   220  // such an address exists for addr in a. That is, if addr is larger than
   221  // any address known to a, the second return value will be false.
   222  func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
   223  	i := a.findSucc(addr)
   224  	if i == 0 {
   225  		return a.ranges[0].base.addr(), true
   226  	}
   227  	if a.ranges[i-1].contains(addr) {
   228  		return addr, true
   229  	}
   230  	if i < len(a.ranges) {
   231  		return a.ranges[i].base.addr(), true
   232  	}
   233  	return 0, false
   234  }
   235  
   236  // contains returns true if a covers the address addr.
   237  func (a *addrRanges) contains(addr uintptr) bool {
   238  	i := a.findSucc(addr)
   239  	if i == 0 {
   240  		return false
   241  	}
   242  	return a.ranges[i-1].contains(addr)
   243  }
   244  
   245  // add inserts a new address range to a.
   246  //
   247  // r must not overlap with any address range in a and r.size() must be > 0.
   248  func (a *addrRanges) add(r addrRange) {
   249  	// The copies in this function are potentially expensive, but this data
   250  	// structure is meant to represent the Go heap. At worst, copying this
   251  	// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
   252  	// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
   253  	// arenas which is completely discontiguous. ~160µs is still a lot, but in
   254  	// practice most platforms have 64 MiB arenas (which cuts this by a factor
   255  	// of 16) and Go heaps are usually mostly contiguous, so the chance that
   256  	// an addrRanges even grows to that size is extremely low.
   257  
   258  	// An empty range has no effect on the set of addresses represented
   259  	// by a, but passing a zero-sized range is almost always a bug.
   260  	if r.size() == 0 {
   261  		print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n")
   262  		throw("attempted to add zero-sized address range")
   263  	}
   264  	// Because we assume r is not currently represented in a,
   265  	// findSucc gives us our insertion index.
   266  	i := a.findSucc(r.base.addr())
   267  	coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
   268  	coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
   269  	if coalescesUp && coalescesDown {
   270  		// We have neighbors and they both border us.
   271  		// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
   272  		a.ranges[i-1].limit = a.ranges[i].limit
   273  
   274  		// Delete a.ranges[i].
   275  		copy(a.ranges[i:], a.ranges[i+1:])
   276  		a.ranges = a.ranges[:len(a.ranges)-1]
   277  	} else if coalescesDown {
   278  		// We have a neighbor at a lower address only and it borders us.
   279  		// Merge the new space into a.ranges[i-1].
   280  		a.ranges[i-1].limit = r.limit
   281  	} else if coalescesUp {
   282  		// We have a neighbor at a higher address only and it borders us.
   283  		// Merge the new space into a.ranges[i].
   284  		a.ranges[i].base = r.base
   285  	} else {
   286  		// We may or may not have neighbors which don't border us.
   287  		// Add the new range.
   288  		if len(a.ranges)+1 > cap(a.ranges) {
   289  			// Grow the array. Note that this leaks the old array, but since
   290  			// we're doubling we have at most 2x waste. For a 1 TiB heap and
   291  			// 4 MiB arenas which are all discontiguous (both very conservative
   292  			// assumptions), this would waste at most 4 MiB of memory.
   293  			oldRanges := a.ranges
   294  			ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
   295  			ranges.len = len(oldRanges) + 1
   296  			ranges.cap = cap(oldRanges) * 2
   297  			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
   298  
   299  			// Copy in the old array, but make space for the new range.
   300  			copy(a.ranges[:i], oldRanges[:i])
   301  			copy(a.ranges[i+1:], oldRanges[i:])
   302  		} else {
   303  			a.ranges = a.ranges[:len(a.ranges)+1]
   304  			copy(a.ranges[i+1:], a.ranges[i:])
   305  		}
   306  		a.ranges[i] = r
   307  	}
   308  	a.totalBytes += r.size()
   309  }
   310  
   311  // removeLast removes and returns the highest-addressed contiguous range
   312  // of a, or the last nBytes of that range, whichever is smaller. If a is
   313  // empty, it returns an empty range.
   314  func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
   315  	if len(a.ranges) == 0 {
   316  		return addrRange{}
   317  	}
   318  	r := a.ranges[len(a.ranges)-1]
   319  	size := r.size()
   320  	if size > nBytes {
   321  		newEnd := r.limit.sub(nBytes)
   322  		a.ranges[len(a.ranges)-1].limit = newEnd
   323  		a.totalBytes -= nBytes
   324  		return addrRange{newEnd, r.limit}
   325  	}
   326  	a.ranges = a.ranges[:len(a.ranges)-1]
   327  	a.totalBytes -= size
   328  	return r
   329  }
   330  
   331  // removeGreaterEqual removes the ranges of a which are above addr, and additionally
   332  // splits any range containing addr.
   333  func (a *addrRanges) removeGreaterEqual(addr uintptr) {
   334  	pivot := a.findSucc(addr)
   335  	if pivot == 0 {
   336  		// addr is before all ranges in a.
   337  		a.totalBytes = 0
   338  		a.ranges = a.ranges[:0]
   339  		return
   340  	}
   341  	removed := uintptr(0)
   342  	for _, r := range a.ranges[pivot:] {
   343  		removed += r.size()
   344  	}
   345  	if r := a.ranges[pivot-1]; r.contains(addr) {
   346  		removed += r.size()
   347  		r = r.removeGreaterEqual(addr)
   348  		if r.size() == 0 {
   349  			pivot--
   350  		} else {
   351  			removed -= r.size()
   352  			a.ranges[pivot-1] = r
   353  		}
   354  	}
   355  	a.ranges = a.ranges[:pivot]
   356  	a.totalBytes -= removed
   357  }
   358  
   359  // cloneInto makes a deep clone of a's state into b, re-using
   360  // b's ranges if able.
   361  func (a *addrRanges) cloneInto(b *addrRanges) {
   362  	if len(a.ranges) > cap(b.ranges) {
   363  		// Grow the array.
   364  		ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
   365  		ranges.len = 0
   366  		ranges.cap = cap(a.ranges)
   367  		ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
   368  	}
   369  	b.ranges = b.ranges[:len(a.ranges)]
   370  	b.totalBytes = a.totalBytes
   371  	copy(b.ranges, a.ranges)
   372  }
   373  

View as plain text