2018-09-24 21:46:21 +00:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
2022-02-11 14:53:56 -08:00
|
|
|
"internal/abi"
|
|
|
|
"internal/goarch"
|
2018-09-24 21:46:21 +00:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2019-08-31 03:01:15 +00:00
|
|
|
// For gccgo, use go:linkname to export compiler-called functions.
|
2019-06-06 00:44:01 +00:00
|
|
|
//
|
2019-08-31 03:01:15 +00:00
|
|
|
//go:linkname mapaccess1_fast32
|
|
|
|
//go:linkname mapaccess2_fast32
|
|
|
|
//go:linkname mapassign_fast32
|
|
|
|
//go:linkname mapassign_fast32ptr
|
|
|
|
//go:linkname mapdelete_fast32
|
2019-06-06 00:44:01 +00:00
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
2022-02-11 14:53:56 -08:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table. No need to hash.
|
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
m := bucketMask(h.B)
|
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
|
2019-01-18 19:04:36 +00:00
|
|
|
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
2019-09-06 18:12:46 +00:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
2022-02-11 14:53:56 -08:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table. No need to hash.
|
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
m := bucketMask(h.B)
|
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
|
2019-01-18 19:04:36 +00:00
|
|
|
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
2019-09-06 18:12:46 +00:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|
|
|
if h == nil {
|
|
|
|
panic(plainError("assignment to entry in nil map"))
|
|
|
|
}
|
|
|
|
if raceenabled {
|
|
|
|
callerpc := getcallerpc()
|
2022-02-11 14:53:56 -08:00
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
2019-01-18 19:04:36 +00:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
if h.buckets == nil {
|
|
|
|
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast32(t, h, bucket)
|
|
|
|
}
|
2020-12-23 09:57:37 -08:00
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
var insertb *bmap
|
|
|
|
var inserti uintptr
|
|
|
|
var insertk unsafe.Pointer
|
|
|
|
|
2019-01-18 19:04:36 +00:00
|
|
|
bucketloop:
|
2018-09-24 21:46:21 +00:00
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2019-01-18 19:04:36 +00:00
|
|
|
if isEmpty(b.tophash[i]) {
|
2018-09-24 21:46:21 +00:00
|
|
|
if insertb == nil {
|
|
|
|
inserti = i
|
|
|
|
insertb = b
|
|
|
|
}
|
2019-01-18 19:04:36 +00:00
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
break bucketloop
|
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
inserti = i
|
|
|
|
insertb = b
|
|
|
|
goto done
|
|
|
|
}
|
|
|
|
ovf := b.overflow(t)
|
|
|
|
if ovf == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
b = ovf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
|
|
// and we're not already in the middle of growing, start growing.
|
|
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
|
|
hashGrow(t, h)
|
|
|
|
goto again // Growing the table invalidates everything, so try again
|
|
|
|
}
|
|
|
|
|
|
|
|
if insertb == nil {
|
2020-12-23 09:57:37 -08:00
|
|
|
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
2018-09-24 21:46:21 +00:00
|
|
|
insertb = h.newoverflow(t, b)
|
|
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
|
|
}
|
|
|
|
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
|
|
|
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
|
|
|
// store new key at insert position
|
|
|
|
*(*uint32)(insertk) = key
|
|
|
|
|
|
|
|
h.count++
|
|
|
|
|
|
|
|
done:
|
2019-09-06 18:12:46 +00:00
|
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
|
2018-09-24 21:46:21 +00:00
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
2019-09-06 18:12:46 +00:00
|
|
|
return elem
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|
|
|
if h == nil {
|
|
|
|
panic(plainError("assignment to entry in nil map"))
|
|
|
|
}
|
|
|
|
if raceenabled {
|
|
|
|
callerpc := getcallerpc()
|
2022-02-11 14:53:56 -08:00
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
2019-01-18 19:04:36 +00:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
if h.buckets == nil {
|
|
|
|
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast32(t, h, bucket)
|
|
|
|
}
|
2020-12-23 09:57:37 -08:00
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
var insertb *bmap
|
|
|
|
var inserti uintptr
|
|
|
|
var insertk unsafe.Pointer
|
|
|
|
|
2019-01-18 19:04:36 +00:00
|
|
|
bucketloop:
|
2018-09-24 21:46:21 +00:00
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2019-01-18 19:04:36 +00:00
|
|
|
if isEmpty(b.tophash[i]) {
|
2018-09-24 21:46:21 +00:00
|
|
|
if insertb == nil {
|
|
|
|
inserti = i
|
|
|
|
insertb = b
|
|
|
|
}
|
2019-01-18 19:04:36 +00:00
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
break bucketloop
|
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
inserti = i
|
|
|
|
insertb = b
|
|
|
|
goto done
|
|
|
|
}
|
|
|
|
ovf := b.overflow(t)
|
|
|
|
if ovf == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
b = ovf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
|
|
// and we're not already in the middle of growing, start growing.
|
|
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
|
|
hashGrow(t, h)
|
|
|
|
goto again // Growing the table invalidates everything, so try again
|
|
|
|
}
|
|
|
|
|
|
|
|
if insertb == nil {
|
2020-12-23 09:57:37 -08:00
|
|
|
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
2018-09-24 21:46:21 +00:00
|
|
|
insertb = h.newoverflow(t, b)
|
|
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
|
|
}
|
|
|
|
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
|
|
|
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
|
|
|
// store new key at insert position
|
|
|
|
*(*unsafe.Pointer)(insertk) = key
|
|
|
|
|
|
|
|
h.count++
|
|
|
|
|
|
|
|
done:
|
2019-09-06 18:12:46 +00:00
|
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
|
2018-09-24 21:46:21 +00:00
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
2019-09-06 18:12:46 +00:00
|
|
|
return elem
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
2022-02-11 14:53:56 -08:00
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
2019-01-18 19:04:36 +00:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast32(t, h, bucket)
|
|
|
|
}
|
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
2019-01-18 19:04:36 +00:00
|
|
|
bOrig := b
|
2018-09-24 21:46:21 +00:00
|
|
|
search:
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
|
2019-01-18 19:04:36 +00:00
|
|
|
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
|
2018-09-24 21:46:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Only clear key if there are pointers in it.
|
2020-12-23 09:57:37 -08:00
|
|
|
// This can only happen if pointers are 32 bit
|
|
|
|
// wide as 64 bit pointers do not fit into a 32 bit key.
|
2022-02-11 14:53:56 -08:00
|
|
|
if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
|
2020-12-23 09:57:37 -08:00
|
|
|
// The key must be a pointer as we checked pointers are
|
|
|
|
// 32 bits wide and the key is 32 bits wide also.
|
|
|
|
*(*unsafe.Pointer)(k) = nil
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
2019-09-06 18:12:46 +00:00
|
|
|
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
|
|
|
|
if t.elem.ptrdata != 0 {
|
|
|
|
memclrHasPointers(e, t.elem.size)
|
2018-09-24 21:46:21 +00:00
|
|
|
} else {
|
2019-09-06 18:12:46 +00:00
|
|
|
memclrNoHeapPointers(e, t.elem.size)
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
2019-01-18 19:04:36 +00:00
|
|
|
b.tophash[i] = emptyOne
|
|
|
|
// If the bucket now ends in a bunch of emptyOne states,
|
|
|
|
// change those to emptyRest states.
|
|
|
|
if i == bucketCnt-1 {
|
|
|
|
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
|
|
|
goto notLast
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if b.tophash[i+1] != emptyRest {
|
|
|
|
goto notLast
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
b.tophash[i] = emptyRest
|
|
|
|
if i == 0 {
|
|
|
|
if b == bOrig {
|
|
|
|
break // beginning of initial bucket, we're done.
|
|
|
|
}
|
|
|
|
// Find previous bucket, continue at its last entry.
|
|
|
|
c := b
|
|
|
|
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
|
|
|
}
|
|
|
|
i = bucketCnt - 1
|
|
|
|
} else {
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
if b.tophash[i] != emptyOne {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
notLast:
|
2018-09-24 21:46:21 +00:00
|
|
|
h.count--
|
2020-12-23 09:57:37 -08:00
|
|
|
// Reset the hash seed to make it more difficult for attackers to
|
|
|
|
// repeatedly trigger hash collisions. See issue 25237.
|
|
|
|
if h.count == 0 {
|
|
|
|
h.hash0 = fastrand()
|
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
break search
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
|
|
|
}
|
|
|
|
|
|
|
|
func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
|
|
|
|
// make sure we evacuate the oldbucket corresponding
|
|
|
|
// to the bucket we're about to use
|
|
|
|
evacuate_fast32(t, h, bucket&h.oldbucketmask())
|
|
|
|
|
|
|
|
// evacuate one more oldbucket to make progress on growing
|
|
|
|
if h.growing() {
|
|
|
|
evacuate_fast32(t, h, h.nevacuate)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|
|
|
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
|
|
|
|
newbit := h.noldbuckets()
|
|
|
|
if !evacuated(b) {
|
|
|
|
// TODO: reuse overflow buckets instead of using new ones, if there
|
|
|
|
// is no iterator using the old buckets. (If !oldIterator.)
|
|
|
|
|
|
|
|
// xy contains the x and y (low and high) evacuation destinations.
|
|
|
|
var xy [2]evacDst
|
|
|
|
x := &xy[0]
|
|
|
|
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
|
|
|
|
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
2019-09-06 18:12:46 +00:00
|
|
|
x.e = add(x.k, bucketCnt*4)
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// Only calculate y pointers if we're growing bigger.
|
|
|
|
// Otherwise GC can see bad pointers.
|
|
|
|
y := &xy[1]
|
|
|
|
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
|
|
|
|
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
2019-09-06 18:12:46 +00:00
|
|
|
y.e = add(y.k, bucketCnt*4)
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
k := add(unsafe.Pointer(b), dataOffset)
|
2019-09-06 18:12:46 +00:00
|
|
|
e := add(k, bucketCnt*4)
|
|
|
|
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
|
2018-09-24 21:46:21 +00:00
|
|
|
top := b.tophash[i]
|
2019-01-18 19:04:36 +00:00
|
|
|
if isEmpty(top) {
|
2018-09-24 21:46:21 +00:00
|
|
|
b.tophash[i] = evacuatedEmpty
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if top < minTopHash {
|
|
|
|
throw("bad map state")
|
|
|
|
}
|
|
|
|
var useY uint8
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// Compute hash to make our evacuation decision (whether we need
|
2019-09-06 18:12:46 +00:00
|
|
|
// to send this key/elem to bucket x or bucket y).
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
hash := t.hasher(k, uintptr(h.hash0))
|
2018-09-24 21:46:21 +00:00
|
|
|
if hash&newbit != 0 {
|
|
|
|
useY = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
|
|
|
dst := &xy[useY] // evacuation destination
|
|
|
|
|
|
|
|
if dst.i == bucketCnt {
|
|
|
|
dst.b = h.newoverflow(t, dst.b)
|
|
|
|
dst.i = 0
|
|
|
|
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
2019-09-06 18:12:46 +00:00
|
|
|
dst.e = add(dst.k, bucketCnt*4)
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
|
|
|
|
|
|
|
// Copy key.
|
2022-02-11 14:53:56 -08:00
|
|
|
if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
|
2018-09-24 21:46:21 +00:00
|
|
|
// Write with a write barrier.
|
|
|
|
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
|
|
|
|
} else {
|
|
|
|
*(*uint32)(dst.k) = *(*uint32)(k)
|
|
|
|
}
|
|
|
|
|
2019-09-06 18:12:46 +00:00
|
|
|
typedmemmove(t.elem, dst.e, e)
|
2018-09-24 21:46:21 +00:00
|
|
|
dst.i++
|
|
|
|
// These updates might push these pointers past the end of the
|
2019-09-06 18:12:46 +00:00
|
|
|
// key or elem arrays. That's ok, as we have the overflow pointer
|
2018-09-24 21:46:21 +00:00
|
|
|
// at the end of the bucket to protect against pointing past the
|
|
|
|
// end of the bucket.
|
|
|
|
dst.k = add(dst.k, 4)
|
2019-09-06 18:12:46 +00:00
|
|
|
dst.e = add(dst.e, uintptr(t.elemsize))
|
2018-09-24 21:46:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-06 18:12:46 +00:00
|
|
|
// Unlink the overflow buckets & clear key/elem to help GC.
|
|
|
|
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
|
2018-09-24 21:46:21 +00:00
|
|
|
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
|
|
|
|
// Preserve b.tophash because the evacuation
|
|
|
|
// state is maintained there.
|
|
|
|
ptr := add(b, dataOffset)
|
|
|
|
n := uintptr(t.bucketsize) - dataOffset
|
|
|
|
memclrHasPointers(ptr, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if oldbucket == h.nevacuate {
|
|
|
|
advanceEvacuationMark(h, t, newbit)
|
|
|
|
}
|
|
|
|
}
|