Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/map_faststr.go

Documentation: runtime

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
    13  	if raceenabled && h != nil {
    14  		callerpc := getcallerpc()
    15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
    16  	}
    17  	if h == nil || h.count == 0 {
    18  		return unsafe.Pointer(&zeroVal[0])
    19  	}
    20  	if h.flags&hashWriting != 0 {
    21  		throw("concurrent map read and map write")
    22  	}
    23  	key := stringStructOf(&ky)
    24  	if h.B == 0 {
    25  		// One-bucket table.
    26  		b := (*bmap)(h.buckets)
    27  		if key.len < 32 {
    28  			// short key, doing lots of comparisons is ok
    29  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    30  				k := (*stringStruct)(kptr)
    31  				if k.len != key.len || isEmpty(b.tophash[i]) {
    32  					if b.tophash[i] == emptyRest {
    33  						break
    34  					}
    35  					continue
    36  				}
    37  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    38  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
    39  				}
    40  			}
    41  			return unsafe.Pointer(&zeroVal[0])
    42  		}
    43  		// long key, try not to do more comparisons than necessary
    44  		keymaybe := uintptr(bucketCnt)
    45  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    46  			k := (*stringStruct)(kptr)
    47  			if k.len != key.len || isEmpty(b.tophash[i]) {
    48  				if b.tophash[i] == emptyRest {
    49  					break
    50  				}
    51  				continue
    52  			}
    53  			if k.str == key.str {
    54  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
    55  			}
    56  			// check first 4 bytes
    57  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
    58  				continue
    59  			}
    60  			// check last 4 bytes
    61  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
    62  				continue
    63  			}
    64  			if keymaybe != bucketCnt {
    65  				// Two keys are potential matches. Use hash to distinguish them.
    66  				goto dohash
    67  			}
    68  			keymaybe = i
    69  		}
    70  		if keymaybe != bucketCnt {
    71  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
    72  			if memequal(k.str, key.str, uintptr(key.len)) {
    73  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
    74  			}
    75  		}
    76  		return unsafe.Pointer(&zeroVal[0])
    77  	}
    78  dohash:
    79  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
    80  	m := bucketMask(h.B)
    81  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    82  	if c := h.oldbuckets; c != nil {
    83  		if !h.sameSizeGrow() {
    84  			// There used to be half as many buckets; mask down one more power of two.
    85  			m >>= 1
    86  		}
    87  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    88  		if !evacuated(oldb) {
    89  			b = oldb
    90  		}
    91  	}
    92  	top := tophash(hash)
    93  	for ; b != nil; b = b.overflow(t) {
    94  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    95  			k := (*stringStruct)(kptr)
    96  			if k.len != key.len || b.tophash[i] != top {
    97  				continue
    98  			}
    99  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   100  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
   101  			}
   102  		}
   103  	}
   104  	return unsafe.Pointer(&zeroVal[0])
   105  }
   106  
   107  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   108  	if raceenabled && h != nil {
   109  		callerpc := getcallerpc()
   110  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   111  	}
   112  	if h == nil || h.count == 0 {
   113  		return unsafe.Pointer(&zeroVal[0]), false
   114  	}
   115  	if h.flags&hashWriting != 0 {
   116  		throw("concurrent map read and map write")
   117  	}
   118  	key := stringStructOf(&ky)
   119  	if h.B == 0 {
   120  		// One-bucket table.
   121  		b := (*bmap)(h.buckets)
   122  		if key.len < 32 {
   123  			// short key, doing lots of comparisons is ok
   124  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   125  				k := (*stringStruct)(kptr)
   126  				if k.len != key.len || isEmpty(b.tophash[i]) {
   127  					if b.tophash[i] == emptyRest {
   128  						break
   129  					}
   130  					continue
   131  				}
   132  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   133  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   134  				}
   135  			}
   136  			return unsafe.Pointer(&zeroVal[0]), false
   137  		}
   138  		// long key, try not to do more comparisons than necessary
   139  		keymaybe := uintptr(bucketCnt)
   140  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   141  			k := (*stringStruct)(kptr)
   142  			if k.len != key.len || isEmpty(b.tophash[i]) {
   143  				if b.tophash[i] == emptyRest {
   144  					break
   145  				}
   146  				continue
   147  			}
   148  			if k.str == key.str {
   149  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   150  			}
   151  			// check first 4 bytes
   152  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   153  				continue
   154  			}
   155  			// check last 4 bytes
   156  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   157  				continue
   158  			}
   159  			if keymaybe != bucketCnt {
   160  				// Two keys are potential matches. Use hash to distinguish them.
   161  				goto dohash
   162  			}
   163  			keymaybe = i
   164  		}
   165  		if keymaybe != bucketCnt {
   166  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   167  			if memequal(k.str, key.str, uintptr(key.len)) {
   168  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
   169  			}
   170  		}
   171  		return unsafe.Pointer(&zeroVal[0]), false
   172  	}
   173  dohash:
   174  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   175  	m := bucketMask(h.B)
   176  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   177  	if c := h.oldbuckets; c != nil {
   178  		if !h.sameSizeGrow() {
   179  			// There used to be half as many buckets; mask down one more power of two.
   180  			m >>= 1
   181  		}
   182  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   183  		if !evacuated(oldb) {
   184  			b = oldb
   185  		}
   186  	}
   187  	top := tophash(hash)
   188  	for ; b != nil; b = b.overflow(t) {
   189  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   190  			k := (*stringStruct)(kptr)
   191  			if k.len != key.len || b.tophash[i] != top {
   192  				continue
   193  			}
   194  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   195  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   196  			}
   197  		}
   198  	}
   199  	return unsafe.Pointer(&zeroVal[0]), false
   200  }
   201  
   202  func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
   203  	if h == nil {
   204  		panic(plainError("assignment to entry in nil map"))
   205  	}
   206  	if raceenabled {
   207  		callerpc := getcallerpc()
   208  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
   209  	}
   210  	if h.flags&hashWriting != 0 {
   211  		throw("concurrent map writes")
   212  	}
   213  	key := stringStructOf(&s)
   214  	hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
   215  
   216  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   217  	h.flags ^= hashWriting
   218  
   219  	if h.buckets == nil {
   220  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   221  	}
   222  
   223  again:
   224  	bucket := hash & bucketMask(h.B)
   225  	if h.growing() {
   226  		growWork_faststr(t, h, bucket)
   227  	}
   228  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   229  	top := tophash(hash)
   230  
   231  	var insertb *bmap
   232  	var inserti uintptr
   233  	var insertk unsafe.Pointer
   234  
   235  bucketloop:
   236  	for {
   237  		for i := uintptr(0); i < bucketCnt; i++ {
   238  			if b.tophash[i] != top {
   239  				if isEmpty(b.tophash[i]) && insertb == nil {
   240  					insertb = b
   241  					inserti = i
   242  				}
   243  				if b.tophash[i] == emptyRest {
   244  					break bucketloop
   245  				}
   246  				continue
   247  			}
   248  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   249  			if k.len != key.len {
   250  				continue
   251  			}
   252  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   253  				continue
   254  			}
   255  			// already have a mapping for key. Update it.
   256  			inserti = i
   257  			insertb = b
   258  			// Overwrite existing key, so it can be garbage collected.
   259  			// The size is already guaranteed to be set correctly.
   260  			k.str = key.str
   261  			goto done
   262  		}
   263  		ovf := b.overflow(t)
   264  		if ovf == nil {
   265  			break
   266  		}
   267  		b = ovf
   268  	}
   269  
   270  	// Did not find mapping for key. Allocate new cell & add entry.
   271  
   272  	// If we hit the max load factor or we have too many overflow buckets,
   273  	// and we're not already in the middle of growing, start growing.
   274  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   275  		hashGrow(t, h)
   276  		goto again // Growing the table invalidates everything, so try again
   277  	}
   278  
   279  	if insertb == nil {
   280  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   281  		insertb = h.newoverflow(t, b)
   282  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   283  	}
   284  	insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
   285  
   286  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
   287  	// store new key at insert position
   288  	*((*stringStruct)(insertk)) = *key
   289  	h.count++
   290  
   291  done:
   292  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
   293  	if h.flags&hashWriting == 0 {
   294  		throw("concurrent map writes")
   295  	}
   296  	h.flags &^= hashWriting
   297  	return elem
   298  }
   299  
   300  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   301  	if raceenabled && h != nil {
   302  		callerpc := getcallerpc()
   303  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
   304  	}
   305  	if h == nil || h.count == 0 {
   306  		return
   307  	}
   308  	if h.flags&hashWriting != 0 {
   309  		throw("concurrent map writes")
   310  	}
   311  
   312  	key := stringStructOf(&ky)
   313  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   314  
   315  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   316  	h.flags ^= hashWriting
   317  
   318  	bucket := hash & bucketMask(h.B)
   319  	if h.growing() {
   320  		growWork_faststr(t, h, bucket)
   321  	}
   322  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   323  	bOrig := b
   324  	top := tophash(hash)
   325  search:
   326  	for ; b != nil; b = b.overflow(t) {
   327  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   328  			k := (*stringStruct)(kptr)
   329  			if k.len != key.len || b.tophash[i] != top {
   330  				continue
   331  			}
   332  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   333  				continue
   334  			}
   335  			// Clear key's pointer.
   336  			k.str = nil
   337  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
   338  			if t.elem.ptrdata != 0 {
   339  				memclrHasPointers(e, t.elem.size)
   340  			} else {
   341  				memclrNoHeapPointers(e, t.elem.size)
   342  			}
   343  			b.tophash[i] = emptyOne
   344  			// If the bucket now ends in a bunch of emptyOne states,
   345  			// change those to emptyRest states.
   346  			if i == bucketCnt-1 {
   347  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   348  					goto notLast
   349  				}
   350  			} else {
   351  				if b.tophash[i+1] != emptyRest {
   352  					goto notLast
   353  				}
   354  			}
   355  			for {
   356  				b.tophash[i] = emptyRest
   357  				if i == 0 {
   358  					if b == bOrig {
   359  						break // beginning of initial bucket, we're done.
   360  					}
   361  					// Find previous bucket, continue at its last entry.
   362  					c := b
   363  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   364  					}
   365  					i = bucketCnt - 1
   366  				} else {
   367  					i--
   368  				}
   369  				if b.tophash[i] != emptyOne {
   370  					break
   371  				}
   372  			}
   373  		notLast:
   374  			h.count--
   375  			// Reset the hash seed to make it more difficult for attackers to
   376  			// repeatedly trigger hash collisions. See issue 25237.
   377  			if h.count == 0 {
   378  				h.hash0 = fastrand()
   379  			}
   380  			break search
   381  		}
   382  	}
   383  
   384  	if h.flags&hashWriting == 0 {
   385  		throw("concurrent map writes")
   386  	}
   387  	h.flags &^= hashWriting
   388  }
   389  
   390  func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
   391  	// make sure we evacuate the oldbucket corresponding
   392  	// to the bucket we're about to use
   393  	evacuate_faststr(t, h, bucket&h.oldbucketmask())
   394  
   395  	// evacuate one more oldbucket to make progress on growing
   396  	if h.growing() {
   397  		evacuate_faststr(t, h, h.nevacuate)
   398  	}
   399  }
   400  
   401  func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
   402  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   403  	newbit := h.noldbuckets()
   404  	if !evacuated(b) {
   405  		// TODO: reuse overflow buckets instead of using new ones, if there
   406  		// is no iterator using the old buckets.  (If !oldIterator.)
   407  
   408  		// xy contains the x and y (low and high) evacuation destinations.
   409  		var xy [2]evacDst
   410  		x := &xy[0]
   411  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   412  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   413  		x.e = add(x.k, bucketCnt*2*sys.PtrSize)
   414  
   415  		if !h.sameSizeGrow() {
   416  			// Only calculate y pointers if we're growing bigger.
   417  			// Otherwise GC can see bad pointers.
   418  			y := &xy[1]
   419  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   420  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   421  			y.e = add(y.k, bucketCnt*2*sys.PtrSize)
   422  		}
   423  
   424  		for ; b != nil; b = b.overflow(t) {
   425  			k := add(unsafe.Pointer(b), dataOffset)
   426  			e := add(k, bucketCnt*2*sys.PtrSize)
   427  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
   428  				top := b.tophash[i]
   429  				if isEmpty(top) {
   430  					b.tophash[i] = evacuatedEmpty
   431  					continue
   432  				}
   433  				if top < minTopHash {
   434  					throw("bad map state")
   435  				}
   436  				var useY uint8
   437  				if !h.sameSizeGrow() {
   438  					// Compute hash to make our evacuation decision (whether we need
   439  					// to send this key/elem to bucket x or bucket y).
   440  					hash := t.hasher(k, uintptr(h.hash0))
   441  					if hash&newbit != 0 {
   442  						useY = 1
   443  					}
   444  				}
   445  
   446  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   447  				dst := &xy[useY]                 // evacuation destination
   448  
   449  				if dst.i == bucketCnt {
   450  					dst.b = h.newoverflow(t, dst.b)
   451  					dst.i = 0
   452  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   453  					dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
   454  				}
   455  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   456  
   457  				// Copy key.
   458  				*(*string)(dst.k) = *(*string)(k)
   459  
   460  				typedmemmove(t.elem, dst.e, e)
   461  				dst.i++
   462  				// These updates might push these pointers past the end of the
   463  				// key or elem arrays.  That's ok, as we have the overflow pointer
   464  				// at the end of the bucket to protect against pointing past the
   465  				// end of the bucket.
   466  				dst.k = add(dst.k, 2*sys.PtrSize)
   467  				dst.e = add(dst.e, uintptr(t.elemsize))
   468  			}
   469  		}
   470  		// Unlink the overflow buckets & clear key/elem to help GC.
   471  		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
   472  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
   473  			// Preserve b.tophash because the evacuation
   474  			// state is maintained there.
   475  			ptr := add(b, dataOffset)
   476  			n := uintptr(t.bucketsize) - dataOffset
   477  			memclrHasPointers(ptr, n)
   478  		}
   479  	}
   480  
   481  	if oldbucket == h.nevacuate {
   482  		advanceEvacuationMark(h, t, newbit)
   483  	}
   484  }
   485  

View as plain text