2013-07-16 06:54:42 +00:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2022-02-11 14:53:56 -08:00
|
|
|
"internal/goarch"
|
2013-07-16 06:54:42 +00:00
|
|
|
"math"
|
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
2017-09-14 17:11:35 +00:00
|
|
|
"strconv"
|
2013-07-16 06:54:42 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
)
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func TestHmapSize(t *testing.T) {
|
|
|
|
// The structure of hmap is defined in runtime/map.go
|
|
|
|
// and in cmd/compile/internal/gc/reflect.go and must be in sync.
|
|
|
|
// The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
|
2022-02-11 14:53:56 -08:00
|
|
|
var hmapSize = uintptr(8 + 5*goarch.PtrSize)
|
2018-09-24 21:46:21 +00:00
|
|
|
if runtime.RuntimeHmapSize != hmapSize {
|
|
|
|
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-07-16 06:54:42 +00:00
|
|
|
// negative zero is a good test because:
|
|
|
|
// 1) 0 and -0 are equal, yet have distinct representations.
|
|
|
|
// 2) 0 is represented as all zeros, -0 isn't.
|
|
|
|
// I'm not sure the language spec actually requires this behavior,
|
|
|
|
// but it's what the current map implementation does.
|
|
|
|
func TestNegativeZero(t *testing.T) {
|
|
|
|
m := make(map[float64]bool, 0)
|
|
|
|
|
|
|
|
m[+0.0] = true
|
|
|
|
m[math.Copysign(0.0, -1.0)] = true // should overwrite +0 entry
|
|
|
|
|
|
|
|
if len(m) != 1 {
|
|
|
|
t.Error("length wrong")
|
|
|
|
}
|
|
|
|
|
|
|
|
for k := range m {
|
|
|
|
if math.Copysign(1.0, k) > 0 {
|
|
|
|
t.Error("wrong sign")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m = make(map[float64]bool, 0)
|
|
|
|
m[math.Copysign(0.0, -1.0)] = true
|
|
|
|
m[+0.0] = true // should overwrite -0.0 entry
|
|
|
|
|
|
|
|
if len(m) != 1 {
|
|
|
|
t.Error("length wrong")
|
|
|
|
}
|
|
|
|
|
|
|
|
for k := range m {
|
|
|
|
if math.Copysign(1.0, k) < 0 {
|
|
|
|
t.Error("wrong sign")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func testMapNan(t *testing.T, m map[float64]int) {
|
2013-07-16 06:54:42 +00:00
|
|
|
if len(m) != 3 {
|
|
|
|
t.Error("length wrong")
|
|
|
|
}
|
|
|
|
s := 0
|
|
|
|
for k, v := range m {
|
|
|
|
if k == k {
|
|
|
|
t.Error("nan disappeared")
|
|
|
|
}
|
|
|
|
if (v & (v - 1)) != 0 {
|
|
|
|
t.Error("value wrong")
|
|
|
|
}
|
|
|
|
s |= v
|
|
|
|
}
|
|
|
|
if s != 7 {
|
|
|
|
t.Error("values wrong")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
// nan is a good test because nan != nan, and nan has
|
|
|
|
// a randomized hash value.
|
|
|
|
func TestMapAssignmentNan(t *testing.T) {
|
|
|
|
m := make(map[float64]int, 0)
|
|
|
|
nan := math.NaN()
|
|
|
|
|
|
|
|
// Test assignment.
|
|
|
|
m[nan] = 1
|
|
|
|
m[nan] = 2
|
|
|
|
m[nan] = 4
|
|
|
|
testMapNan(t, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// nan is a good test because nan != nan, and nan has
|
|
|
|
// a randomized hash value.
|
|
|
|
func TestMapOperatorAssignmentNan(t *testing.T) {
|
|
|
|
m := make(map[float64]int, 0)
|
|
|
|
nan := math.NaN()
|
|
|
|
|
|
|
|
// Test assignment operations.
|
|
|
|
m[nan] += 1
|
|
|
|
m[nan] += 2
|
|
|
|
m[nan] += 4
|
|
|
|
testMapNan(t, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMapOperatorAssignment(t *testing.T) {
|
|
|
|
m := make(map[int]int, 0)
|
|
|
|
|
|
|
|
// "m[k] op= x" is rewritten into "m[k] = m[k] op x"
|
|
|
|
// differently when op is / or % than when it isn't.
|
|
|
|
// Simple test to make sure they all work as expected.
|
|
|
|
m[0] = 12345
|
|
|
|
m[0] += 67890
|
|
|
|
m[0] /= 123
|
|
|
|
m[0] %= 456
|
|
|
|
|
|
|
|
const want = (12345 + 67890) / 123 % 456
|
|
|
|
if got := m[0]; got != want {
|
|
|
|
t.Errorf("got %d, want %d", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var sinkAppend bool
|
|
|
|
|
|
|
|
func TestMapAppendAssignment(t *testing.T) {
|
|
|
|
m := make(map[int][]int, 0)
|
|
|
|
|
|
|
|
m[0] = nil
|
|
|
|
m[0] = append(m[0], 12345)
|
|
|
|
m[0] = append(m[0], 67890)
|
|
|
|
sinkAppend, m[0] = !sinkAppend, append(m[0], 123, 456)
|
|
|
|
a := []int{7, 8, 9, 0}
|
|
|
|
m[0] = append(m[0], a...)
|
|
|
|
|
|
|
|
want := []int{12345, 67890, 123, 456, 7, 8, 9, 0}
|
|
|
|
if got := m[0]; !reflect.DeepEqual(got, want) {
|
|
|
|
t.Errorf("got %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-16 06:54:42 +00:00
|
|
|
// Maps aren't actually copied on assignment.
|
|
|
|
func TestAlias(t *testing.T) {
|
|
|
|
m := make(map[int]int, 0)
|
|
|
|
m[0] = 5
|
|
|
|
n := m
|
|
|
|
n[0] = 6
|
|
|
|
if m[0] != 6 {
|
|
|
|
t.Error("alias didn't work")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGrowWithNaN(t *testing.T) {
|
|
|
|
m := make(map[float64]int, 4)
|
|
|
|
nan := math.NaN()
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
// Use both assignment and assignment operations as they may
|
|
|
|
// behave differently.
|
2013-07-16 06:54:42 +00:00
|
|
|
m[nan] = 1
|
|
|
|
m[nan] = 2
|
2018-09-24 21:46:21 +00:00
|
|
|
m[nan] += 4
|
|
|
|
|
2013-07-16 06:54:42 +00:00
|
|
|
cnt := 0
|
|
|
|
s := 0
|
|
|
|
growflag := true
|
|
|
|
for k, v := range m {
|
|
|
|
if growflag {
|
|
|
|
// force a hashtable resize
|
2018-09-24 21:46:21 +00:00
|
|
|
for i := 0; i < 50; i++ {
|
2013-07-16 06:54:42 +00:00
|
|
|
m[float64(i)] = i
|
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
for i := 50; i < 100; i++ {
|
|
|
|
m[float64(i)] += i
|
|
|
|
}
|
2013-07-16 06:54:42 +00:00
|
|
|
growflag = false
|
|
|
|
}
|
|
|
|
if k != k {
|
|
|
|
cnt++
|
|
|
|
s |= v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if cnt != 3 {
|
|
|
|
t.Error("NaN keys lost during grow")
|
|
|
|
}
|
|
|
|
if s != 7 {
|
|
|
|
t.Error("NaN values lost during grow")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type FloatInt struct {
|
|
|
|
x float64
|
|
|
|
y int
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGrowWithNegativeZero(t *testing.T) {
|
|
|
|
negzero := math.Copysign(0.0, -1.0)
|
|
|
|
m := make(map[FloatInt]int, 4)
|
|
|
|
m[FloatInt{0.0, 0}] = 1
|
2018-09-24 21:46:21 +00:00
|
|
|
m[FloatInt{0.0, 1}] += 2
|
|
|
|
m[FloatInt{0.0, 2}] += 4
|
2013-07-16 06:54:42 +00:00
|
|
|
m[FloatInt{0.0, 3}] = 8
|
|
|
|
growflag := true
|
|
|
|
s := 0
|
|
|
|
cnt := 0
|
|
|
|
negcnt := 0
|
|
|
|
// The first iteration should return the +0 key.
|
|
|
|
// The subsequent iterations should return the -0 key.
|
|
|
|
// I'm not really sure this is required by the spec,
|
|
|
|
// but it makes sense.
|
|
|
|
// TODO: are we allowed to get the first entry returned again???
|
|
|
|
for k, v := range m {
|
|
|
|
if v == 0 {
|
|
|
|
continue
|
|
|
|
} // ignore entries added to grow table
|
|
|
|
cnt++
|
|
|
|
if math.Copysign(1.0, k.x) < 0 {
|
|
|
|
if v&16 == 0 {
|
|
|
|
t.Error("key/value not updated together 1")
|
|
|
|
}
|
|
|
|
negcnt++
|
|
|
|
s |= v & 15
|
|
|
|
} else {
|
|
|
|
if v&16 == 16 {
|
|
|
|
t.Error("key/value not updated together 2", k, v)
|
|
|
|
}
|
|
|
|
s |= v
|
|
|
|
}
|
|
|
|
if growflag {
|
|
|
|
// force a hashtable resize
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
m[FloatInt{3.0, i}] = 0
|
|
|
|
}
|
|
|
|
// then change all the entries
|
|
|
|
// to negative zero
|
|
|
|
m[FloatInt{negzero, 0}] = 1 | 16
|
|
|
|
m[FloatInt{negzero, 1}] = 2 | 16
|
|
|
|
m[FloatInt{negzero, 2}] = 4 | 16
|
|
|
|
m[FloatInt{negzero, 3}] = 8 | 16
|
|
|
|
growflag = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s != 15 {
|
|
|
|
t.Error("entry missing", s)
|
|
|
|
}
|
|
|
|
if cnt != 4 {
|
|
|
|
t.Error("wrong number of entries returned by iterator", cnt)
|
|
|
|
}
|
|
|
|
if negcnt != 3 {
|
|
|
|
t.Error("update to negzero missed by iteration", negcnt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIterGrowAndDelete(t *testing.T) {
|
|
|
|
m := make(map[int]int, 4)
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
m[i] = i
|
|
|
|
}
|
|
|
|
growflag := true
|
|
|
|
for k := range m {
|
|
|
|
if growflag {
|
|
|
|
// grow the table
|
|
|
|
for i := 100; i < 1000; i++ {
|
|
|
|
m[i] = i
|
|
|
|
}
|
|
|
|
// delete all odd keys
|
|
|
|
for i := 1; i < 1000; i += 2 {
|
|
|
|
delete(m, i)
|
|
|
|
}
|
|
|
|
growflag = false
|
|
|
|
} else {
|
|
|
|
if k&1 == 1 {
|
|
|
|
t.Error("odd value returned")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure old bucket arrays don't get GCd while
|
|
|
|
// an iterator is still using them.
|
|
|
|
func TestIterGrowWithGC(t *testing.T) {
|
|
|
|
m := make(map[int]int, 4)
|
2018-09-24 21:46:21 +00:00
|
|
|
for i := 0; i < 8; i++ {
|
2013-07-16 06:54:42 +00:00
|
|
|
m[i] = i
|
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
for i := 8; i < 16; i++ {
|
|
|
|
m[i] += i
|
|
|
|
}
|
2013-07-16 06:54:42 +00:00
|
|
|
growflag := true
|
|
|
|
bitmask := 0
|
|
|
|
for k := range m {
|
|
|
|
if k < 16 {
|
|
|
|
bitmask |= 1 << uint(k)
|
|
|
|
}
|
|
|
|
if growflag {
|
|
|
|
// grow the table
|
|
|
|
for i := 100; i < 1000; i++ {
|
|
|
|
m[i] = i
|
|
|
|
}
|
|
|
|
// trigger a gc
|
|
|
|
runtime.GC()
|
|
|
|
growflag = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if bitmask != 1<<16-1 {
|
|
|
|
t.Error("missing key", bitmask)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testConcurrentReadsAfterGrowth(t *testing.T, useReflect bool) {
|
2017-01-14 00:05:42 +00:00
|
|
|
t.Parallel()
|
2013-07-16 06:54:42 +00:00
|
|
|
if runtime.GOMAXPROCS(-1) == 1 {
|
2015-03-26 00:39:45 +00:00
|
|
|
if runtime.GOARCH == "s390" {
|
|
|
|
// Test uses too much address space on 31-bit S390.
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
|
|
|
|
} else {
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16))
|
|
|
|
}
|
2013-07-16 06:54:42 +00:00
|
|
|
}
|
|
|
|
numLoop := 10
|
|
|
|
numGrowStep := 250
|
|
|
|
numReader := 16
|
|
|
|
if testing.Short() {
|
2018-01-09 01:23:08 +00:00
|
|
|
numLoop, numGrowStep = 2, 100
|
2013-07-16 06:54:42 +00:00
|
|
|
}
|
|
|
|
for i := 0; i < numLoop; i++ {
|
|
|
|
m := make(map[int]int, 0)
|
|
|
|
for gs := 0; gs < numGrowStep; gs++ {
|
|
|
|
m[gs] = gs
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(numReader * 2)
|
|
|
|
for nr := 0; nr < numReader; nr++ {
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2015-01-15 00:27:56 +00:00
|
|
|
for range m {
|
2013-07-16 06:54:42 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for key := 0; key < gs; key++ {
|
|
|
|
_ = m[key]
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if useReflect {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
mv := reflect.ValueOf(m)
|
|
|
|
keys := mv.MapKeys()
|
|
|
|
for _, k := range keys {
|
|
|
|
mv.MapIndex(k)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConcurrentReadsAfterGrowth(t *testing.T) {
|
|
|
|
testConcurrentReadsAfterGrowth(t, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConcurrentReadsAfterGrowthReflect(t *testing.T) {
|
|
|
|
testConcurrentReadsAfterGrowth(t, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBigItems(t *testing.T) {
|
|
|
|
var key [256]string
|
|
|
|
for i := 0; i < 256; i++ {
|
|
|
|
key[i] = "foo"
|
|
|
|
}
|
|
|
|
m := make(map[[256]string][256]string, 4)
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
key[37] = fmt.Sprintf("string%02d", i)
|
|
|
|
m[key] = key
|
|
|
|
}
|
|
|
|
var keys [100]string
|
|
|
|
var values [100]string
|
|
|
|
i := 0
|
|
|
|
for k, v := range m {
|
|
|
|
keys[i] = k[37]
|
|
|
|
values[i] = v[37]
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
sort.Strings(keys[:])
|
|
|
|
sort.Strings(values[:])
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
if keys[i] != fmt.Sprintf("string%02d", i) {
|
|
|
|
t.Errorf("#%d: missing key: %v", i, keys[i])
|
|
|
|
}
|
|
|
|
if values[i] != fmt.Sprintf("string%02d", i) {
|
|
|
|
t.Errorf("#%d: missing value: %v", i, values[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 18:15:38 +00:00
|
|
|
func TestMapHugeZero(t *testing.T) {
|
|
|
|
type T [4000]byte
|
|
|
|
m := map[int]T{}
|
|
|
|
x := m[0]
|
|
|
|
if x != (T{}) {
|
|
|
|
t.Errorf("map value not zero")
|
|
|
|
}
|
|
|
|
y, ok := m[0]
|
|
|
|
if ok {
|
|
|
|
t.Errorf("map value should be missing")
|
|
|
|
}
|
|
|
|
if y != (T{}) {
|
|
|
|
t.Errorf("map value not zero")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-16 06:54:42 +00:00
|
|
|
type empty struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEmptyKeyAndValue(t *testing.T) {
|
|
|
|
a := make(map[int]empty, 4)
|
|
|
|
b := make(map[empty]int, 4)
|
|
|
|
c := make(map[empty]empty, 4)
|
|
|
|
a[0] = empty{}
|
|
|
|
b[empty{}] = 0
|
|
|
|
b[empty{}] = 1
|
|
|
|
c[empty{}] = empty{}
|
|
|
|
|
|
|
|
if len(a) != 1 {
|
|
|
|
t.Errorf("empty value insert problem")
|
|
|
|
}
|
|
|
|
if b[empty{}] != 1 {
|
|
|
|
t.Errorf("empty key returned wrong value")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests a map with a single bucket, with same-lengthed short keys
|
|
|
|
// ("quick keys") as well as long keys.
|
|
|
|
func TestSingleBucketMapStringKeys_DupLen(t *testing.T) {
|
|
|
|
testMapLookups(t, map[string]string{
|
2018-09-24 21:46:21 +00:00
|
|
|
"x": "x1val",
|
|
|
|
"xx": "x2val",
|
|
|
|
"foo": "fooval",
|
|
|
|
"bar": "barval", // same key length as "foo"
|
|
|
|
"xxxx": "x4val",
|
2013-07-16 06:54:42 +00:00
|
|
|
strings.Repeat("x", 128): "longval1",
|
|
|
|
strings.Repeat("y", 128): "longval2",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests a map with a single bucket, with all keys having different lengths.
|
|
|
|
func TestSingleBucketMapStringKeys_NoDupLen(t *testing.T) {
|
|
|
|
testMapLookups(t, map[string]string{
|
|
|
|
"x": "x1val",
|
|
|
|
"xx": "x2val",
|
|
|
|
"foo": "fooval",
|
|
|
|
"xxxx": "x4val",
|
|
|
|
"xxxxx": "x5val",
|
|
|
|
"xxxxxx": "x6val",
|
|
|
|
strings.Repeat("x", 128): "longval",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testMapLookups(t *testing.T, m map[string]string) {
|
|
|
|
for k, v := range m {
|
|
|
|
if m[k] != v {
|
|
|
|
t.Fatalf("m[%q] = %q; want %q", k, m[k], v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-11-06 19:49:01 +00:00
|
|
|
|
|
|
|
// Tests whether the iterator returns the right elements when
|
|
|
|
// started in the middle of a grow, when the keys are NaNs.
|
|
|
|
func TestMapNanGrowIterator(t *testing.T) {
|
|
|
|
m := make(map[float64]int)
|
|
|
|
nan := math.NaN()
|
|
|
|
const nBuckets = 16
|
|
|
|
// To fill nBuckets buckets takes LOAD * nBuckets keys.
|
2022-02-11 14:53:56 -08:00
|
|
|
nKeys := int(nBuckets * runtime.HashLoad)
|
2013-11-06 19:49:01 +00:00
|
|
|
|
|
|
|
// Get map to full point with nan keys.
|
|
|
|
for i := 0; i < nKeys; i++ {
|
|
|
|
m[nan] = i
|
|
|
|
}
|
|
|
|
// Trigger grow
|
|
|
|
m[1.0] = 1
|
|
|
|
delete(m, 1.0)
|
|
|
|
|
|
|
|
// Run iterator
|
|
|
|
found := make(map[int]struct{})
|
|
|
|
for _, v := range m {
|
|
|
|
if v != -1 {
|
|
|
|
if _, repeat := found[v]; repeat {
|
|
|
|
t.Fatalf("repeat of value %d", v)
|
|
|
|
}
|
|
|
|
found[v] = struct{}{}
|
|
|
|
}
|
|
|
|
if len(found) == nKeys/2 {
|
|
|
|
// Halfway through iteration, finish grow.
|
|
|
|
for i := 0; i < nBuckets; i++ {
|
|
|
|
delete(m, 1.0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(found) != nKeys {
|
|
|
|
t.Fatalf("missing value")
|
|
|
|
}
|
|
|
|
}
|
2014-06-04 23:15:33 +00:00
|
|
|
|
|
|
|
func TestMapIterOrder(t *testing.T) {
|
2014-06-06 22:37:27 +00:00
|
|
|
for _, n := range [...]int{3, 7, 9, 15} {
|
2015-01-15 00:27:56 +00:00
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
// Make m be {0: true, 1: true, ..., n-1: true}.
|
|
|
|
m := make(map[int]bool)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
m[i] = true
|
|
|
|
}
|
|
|
|
// Check that iterating over the map produces at least two different orderings.
|
|
|
|
ord := func() []int {
|
|
|
|
var s []int
|
|
|
|
for key := range m {
|
|
|
|
s = append(s, key)
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
first := ord()
|
|
|
|
ok := false
|
|
|
|
for try := 0; try < 100; try++ {
|
|
|
|
if !reflect.DeepEqual(first, ord()) {
|
|
|
|
ok = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Issue 8410
|
|
|
|
func TestMapSparseIterOrder(t *testing.T) {
|
|
|
|
// Run several rounds to increase the probability
|
|
|
|
// of failure. One is not enough.
|
|
|
|
NextRound:
|
|
|
|
for round := 0; round < 10; round++ {
|
2014-06-04 23:15:33 +00:00
|
|
|
m := make(map[int]bool)
|
2015-01-15 00:27:56 +00:00
|
|
|
// Add 1000 items, remove 980.
|
|
|
|
for i := 0; i < 1000; i++ {
|
2014-06-04 23:15:33 +00:00
|
|
|
m[i] = true
|
|
|
|
}
|
2015-01-15 00:27:56 +00:00
|
|
|
for i := 20; i < 1000; i++ {
|
|
|
|
delete(m, i)
|
2014-06-04 23:15:33 +00:00
|
|
|
}
|
2015-01-15 00:27:56 +00:00
|
|
|
|
|
|
|
var first []int
|
|
|
|
for i := range m {
|
|
|
|
first = append(first, i)
|
2014-06-04 23:15:33 +00:00
|
|
|
}
|
2015-01-15 00:27:56 +00:00
|
|
|
|
|
|
|
// 800 chances to get a different iteration order.
|
|
|
|
// See bug 8736 for why we need so many tries.
|
|
|
|
for n := 0; n < 800; n++ {
|
|
|
|
idx := 0
|
|
|
|
for i := range m {
|
|
|
|
if i != first[idx] {
|
|
|
|
// iteration order changed.
|
|
|
|
continue NextRound
|
|
|
|
}
|
|
|
|
idx++
|
|
|
|
}
|
2014-06-04 23:15:33 +00:00
|
|
|
}
|
2015-01-15 00:27:56 +00:00
|
|
|
t.Fatalf("constant iteration order on round %d: %v", round, first)
|
2014-06-04 23:15:33 +00:00
|
|
|
}
|
|
|
|
}
|
2014-07-19 08:53:52 +00:00
|
|
|
|
|
|
|
func TestMapStringBytesLookup(t *testing.T) {
|
|
|
|
// Use large string keys to avoid small-allocation coalescing,
|
|
|
|
// which can cause AllocsPerRun to report lower counts than it should.
|
|
|
|
m := map[string]int{
|
|
|
|
"1000000000000000000000000000000000000000000000000": 1,
|
|
|
|
"2000000000000000000000000000000000000000000000000": 2,
|
|
|
|
}
|
|
|
|
buf := []byte("1000000000000000000000000000000000000000000000000")
|
|
|
|
if x := m[string(buf)]; x != 1 {
|
|
|
|
t.Errorf(`m[string([]byte("1"))] = %d, want 1`, x)
|
|
|
|
}
|
|
|
|
buf[0] = '2'
|
|
|
|
if x := m[string(buf)]; x != 2 {
|
|
|
|
t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x)
|
|
|
|
}
|
|
|
|
|
compiler, runtime: replace hashmap code with Go 1.7 hashmap
This change removes the gccgo-specific hashmap code and replaces it with
the hashmap code from the Go 1.7 runtime. The Go 1.7 hashmap code is
more efficient, does a better job on details like when to update a key,
and provides some support against denial-of-service attacks.
The compiler is changed to call the new hashmap functions instead of the
old ones.
The compiler now tracks which types are reflexive and which require
updating when used as a map key, and records the information in map type
descriptors.
Map_index_expression is simplified. The special case for a map index on
the right hand side of a tuple expression has been unnecessary for some
time, and is removed. The support for specially marking a map index as
an lvalue is removed, in favor of lowering an assignment to a map index
into a function call. The long-obsolete support for a map index of a
pointer to a map is removed.
The __go_new_map_big function (known to the compiler as
Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap
function takes an int64 hint argument.
The old map descriptor type and supporting expression is removed.
The compiler was still supporting the long-obsolete syntax `m[k] = 0,
false` to delete a value from a map. That is now removed, requiring a
change to one of the gccgo-specific tests.
The builtin len function applied to a map or channel p is now compiled
as `p == nil ? 0 : *(*int)(p)`. The __go_chan_len function (known to
the compiler as Runtime::CHAN_LEN) is removed.
Support for a shared zero value for maps to large value types is
introduced, along the lines of the gc compiler. The zero value is
handled as a common variable.
The hash function is changed to take a seed argument, changing the
runtime hash functions and the compiler-generated hash functions.
Unlike the gc compiler, both the hash and equal functions continue to
take the type length.
Types that can not be compared now store nil for the hash and equal
functions, rather than pointing to functions that throw. Interface hash
and comparison functions now check explicitly for nil. This matches the
gc compiler and permits a simple implementation for ismapkey.
The compiler is changed to permit marking struct and array types as
incomparable, meaning that they have no hash or equal function. We use
this for thunk types, removing the existing special code to avoid
generating hash/equal functions for them.
The C runtime code adds memclr, memequal, and memmove functions.
The hashmap code uses go:linkname comments to make the functions
visible, as otherwise the compiler would discard them.
The hashmap code comments out the unused reference to the address of the
first parameter in the race code, as otherwise the compiler thinks that
the parameter escapes and copies it onto the heap. This is probably not
needed when we enable escape analysis.
Several runtime map tests that ere previously skipped for gccgo are now
run.
The Go runtime picks up type kind information and stubs. The type kind
information causes the generated runtime header file to define some
constants, including `empty`, and the C code is adjusted accordingly.
A Go-callable version of runtime.throw, that takes a Go string, is
added to be called from the hashmap code.
Reviewed-on: https://go-review.googlesource.com/29447
* go.go-torture/execute/map-1.go: Replace old map deletion syntax
with call to builtin delete function.
From-SVN: r240334
2016-09-21 20:58:51 +00:00
|
|
|
t.Skip("does not work on gccgo without better escape analysis")
|
|
|
|
|
2014-07-19 08:53:52 +00:00
|
|
|
var x int
|
|
|
|
n := testing.AllocsPerRun(100, func() {
|
|
|
|
x += m[string(buf)]
|
|
|
|
})
|
|
|
|
if n != 0 {
|
|
|
|
t.Errorf("AllocsPerRun for m[string(buf)] = %v, want 0", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
x = 0
|
|
|
|
n = testing.AllocsPerRun(100, func() {
|
|
|
|
y, ok := m[string(buf)]
|
|
|
|
if !ok {
|
|
|
|
panic("!ok")
|
|
|
|
}
|
|
|
|
x += y
|
|
|
|
})
|
|
|
|
if n != 0 {
|
|
|
|
t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n)
|
|
|
|
}
|
|
|
|
}
|
2015-01-15 00:27:56 +00:00
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
func TestMapLargeKeyNoPointer(t *testing.T) {
|
|
|
|
const (
|
|
|
|
I = 1000
|
|
|
|
N = 64
|
|
|
|
)
|
|
|
|
type T [N]int
|
|
|
|
m := make(map[T]int)
|
|
|
|
for i := 0; i < I; i++ {
|
|
|
|
var v T
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
v[j] = i + j
|
|
|
|
}
|
|
|
|
m[v] = i
|
|
|
|
}
|
|
|
|
runtime.GC()
|
|
|
|
for i := 0; i < I; i++ {
|
|
|
|
var v T
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
v[j] = i + j
|
|
|
|
}
|
|
|
|
if m[v] != i {
|
|
|
|
t.Fatalf("corrupted map: want %+v, got %+v", i, m[v])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMapLargeValNoPointer(t *testing.T) {
|
|
|
|
const (
|
|
|
|
I = 1000
|
|
|
|
N = 64
|
|
|
|
)
|
|
|
|
type T [N]int
|
|
|
|
m := make(map[int]T)
|
|
|
|
for i := 0; i < I; i++ {
|
|
|
|
var v T
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
v[j] = i + j
|
|
|
|
}
|
|
|
|
m[i] = v
|
|
|
|
}
|
|
|
|
runtime.GC()
|
|
|
|
for i := 0; i < I; i++ {
|
|
|
|
var v T
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
v[j] = i + j
|
|
|
|
}
|
|
|
|
v1 := m[i]
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
if v1[j] != v[j] {
|
|
|
|
t.Fatalf("corrupted map: want %+v, got %+v", v, v1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
// Test that making a map with a large or invalid hint
|
|
|
|
// doesn't panic. (Issue 19926).
|
|
|
|
func TestIgnoreBogusMapHint(t *testing.T) {
|
|
|
|
for _, hint := range []int64{-1, 1 << 62} {
|
|
|
|
_ = make(map[int]int, hint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-09 01:23:08 +00:00
|
|
|
var mapSink map[int]int
|
|
|
|
|
|
|
|
var mapBucketTests = [...]struct {
|
|
|
|
n int // n is the number of map elements
|
|
|
|
noescape int // number of expected buckets for non-escaping map
|
|
|
|
escape int // number of expected buckets for escaping map
|
|
|
|
}{
|
|
|
|
{-(1 << 30), 1, 1},
|
|
|
|
{-1, 1, 1},
|
|
|
|
{0, 1, 1},
|
|
|
|
{1, 1, 1},
|
|
|
|
{8, 1, 1},
|
|
|
|
{9, 2, 2},
|
|
|
|
{13, 2, 2},
|
|
|
|
{14, 4, 4},
|
|
|
|
{26, 4, 4},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMapBuckets(t *testing.T) {
|
|
|
|
// Test that maps of different sizes have the right number of buckets.
|
|
|
|
// Non-escaping maps with small buckets (like map[int]int) never
|
|
|
|
// have a nil bucket pointer due to starting with preallocated buckets
|
|
|
|
// on the stack. Escaping maps start with a non-nil bucket pointer if
|
|
|
|
// hint size is above bucketCnt and thereby have more than one bucket.
|
2018-09-24 21:46:21 +00:00
|
|
|
// These tests depend on bucketCnt and loadFactor* in map.go.
|
2018-01-09 01:23:08 +00:00
|
|
|
t.Run("mapliteral", func(t *testing.T) {
|
|
|
|
for _, tt := range mapBucketTests {
|
|
|
|
localMap := map[int]int{}
|
|
|
|
// Skip test on gccgo until escape analysis is
|
|
|
|
// turned on.
|
|
|
|
if runtime.MapBucketsPointerIsNil(localMap) && runtime.Compiler != "gccgo" {
|
|
|
|
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
localMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
|
|
|
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
|
|
|
}
|
|
|
|
escapingMap := map[int]int{}
|
|
|
|
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
|
|
|
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
escapingMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
|
|
|
t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
|
|
|
}
|
|
|
|
mapSink = escapingMap
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("nohint", func(t *testing.T) {
|
|
|
|
for _, tt := range mapBucketTests {
|
|
|
|
localMap := make(map[int]int)
|
|
|
|
// Skip test on gccgo until escape analysis is
|
|
|
|
// turned on.
|
|
|
|
if runtime.MapBucketsPointerIsNil(localMap) && runtime.Compiler != "gccgo" {
|
|
|
|
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
localMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
|
|
|
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
|
|
|
}
|
|
|
|
escapingMap := make(map[int]int)
|
|
|
|
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
|
|
|
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
escapingMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
|
|
|
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
|
|
|
}
|
|
|
|
mapSink = escapingMap
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("makemap", func(t *testing.T) {
|
|
|
|
for _, tt := range mapBucketTests {
|
|
|
|
localMap := make(map[int]int, tt.n)
|
|
|
|
// Skip test on gccgo until escape analysis is
|
|
|
|
// turned on.
|
|
|
|
if runtime.MapBucketsPointerIsNil(localMap) && runtime.Compiler != "gccgo" {
|
|
|
|
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
localMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
|
|
|
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
|
|
|
}
|
|
|
|
escapingMap := make(map[int]int, tt.n)
|
|
|
|
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
|
|
|
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
escapingMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
|
|
|
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
|
|
|
}
|
|
|
|
mapSink = escapingMap
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("makemap64", func(t *testing.T) {
|
|
|
|
for _, tt := range mapBucketTests {
|
|
|
|
localMap := make(map[int]int, int64(tt.n))
|
|
|
|
// Skip test on gccgo until escape analysis is
|
|
|
|
// turned on.
|
|
|
|
if runtime.MapBucketsPointerIsNil(localMap) && runtime.Compiler != "gccgo" {
|
|
|
|
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
localMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
|
|
|
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
|
|
|
}
|
|
|
|
escapingMap := make(map[int]int, tt.n)
|
|
|
|
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
|
|
|
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
|
|
|
}
|
|
|
|
for i := 0; i < tt.n; i++ {
|
|
|
|
escapingMap[i] = i
|
|
|
|
}
|
|
|
|
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
|
|
|
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
|
|
|
}
|
|
|
|
mapSink = escapingMap
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-01-15 00:27:56 +00:00
|
|
|
func benchmarkMapPop(b *testing.B, n int) {
|
|
|
|
m := map[int]int{}
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
m[j] = j
|
|
|
|
}
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
// Use iterator to pop an element.
|
|
|
|
// We want this to be fast, see issue 8412.
|
|
|
|
for k := range m {
|
|
|
|
delete(m, k)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) }
|
|
|
|
func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) }
|
|
|
|
func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) }
|
2015-10-31 00:59:47 +00:00
|
|
|
|
2018-01-09 01:23:08 +00:00
|
|
|
var testNonEscapingMapVariable int = 8
|
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
func TestNonEscapingMap(t *testing.T) {
|
|
|
|
t.Skip("does not work on gccgo without better escape analysis")
|
|
|
|
n := testing.AllocsPerRun(1000, func() {
|
2018-01-09 01:23:08 +00:00
|
|
|
m := map[int]int{}
|
|
|
|
m[0] = 0
|
|
|
|
})
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("mapliteral: want 0 allocs, got %v", n)
|
|
|
|
}
|
|
|
|
n = testing.AllocsPerRun(1000, func() {
|
2015-10-31 00:59:47 +00:00
|
|
|
m := make(map[int]int)
|
|
|
|
m[0] = 0
|
|
|
|
})
|
|
|
|
if n != 0 {
|
2018-01-09 01:23:08 +00:00
|
|
|
t.Fatalf("no hint: want 0 allocs, got %v", n)
|
|
|
|
}
|
|
|
|
n = testing.AllocsPerRun(1000, func() {
|
|
|
|
m := make(map[int]int, 8)
|
|
|
|
m[0] = 0
|
|
|
|
})
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("with small hint: want 0 allocs, got %v", n)
|
|
|
|
}
|
|
|
|
n = testing.AllocsPerRun(1000, func() {
|
|
|
|
m := make(map[int]int, testNonEscapingMapVariable)
|
|
|
|
m[0] = 0
|
|
|
|
})
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("with variable hint: want 0 allocs, got %v", n)
|
2015-10-31 00:59:47 +00:00
|
|
|
}
|
2018-01-09 01:23:08 +00:00
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
}
|
2017-09-14 17:11:35 +00:00
|
|
|
|
|
|
|
func benchmarkMapAssignInt32(b *testing.B, n int) {
|
|
|
|
a := make(map[int32]int)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
a[int32(i&(n-1))] = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func benchmarkMapOperatorAssignInt32(b *testing.B, n int) {
|
|
|
|
a := make(map[int32]int)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
a[int32(i&(n-1))] += i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMapAppendAssignInt32(b *testing.B, n int) {
|
|
|
|
a := make(map[int32][]int)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
key := int32(i & (n - 1))
|
|
|
|
a[key] = append(a[key], i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
func benchmarkMapDeleteInt32(b *testing.B, n int) {
|
2018-01-09 01:23:08 +00:00
|
|
|
a := make(map[int32]int, n)
|
2017-09-14 17:11:35 +00:00
|
|
|
b.ResetTimer()
|
2018-01-09 01:23:08 +00:00
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if len(a) == 0 {
|
|
|
|
b.StopTimer()
|
|
|
|
for j := i; j < i+n; j++ {
|
|
|
|
a[int32(j)] = j
|
|
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
2017-09-14 17:11:35 +00:00
|
|
|
delete(a, int32(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMapAssignInt64(b *testing.B, n int) {
|
|
|
|
a := make(map[int64]int)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
a[int64(i&(n-1))] = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func benchmarkMapOperatorAssignInt64(b *testing.B, n int) {
|
|
|
|
a := make(map[int64]int)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
a[int64(i&(n-1))] += i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMapAppendAssignInt64(b *testing.B, n int) {
|
|
|
|
a := make(map[int64][]int)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
key := int64(i & (n - 1))
|
|
|
|
a[key] = append(a[key], i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
func benchmarkMapDeleteInt64(b *testing.B, n int) {
|
2018-01-09 01:23:08 +00:00
|
|
|
a := make(map[int64]int, n)
|
2017-09-14 17:11:35 +00:00
|
|
|
b.ResetTimer()
|
2018-01-09 01:23:08 +00:00
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if len(a) == 0 {
|
|
|
|
b.StopTimer()
|
|
|
|
for j := i; j < i+n; j++ {
|
|
|
|
a[int64(j)] = j
|
|
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
2017-09-14 17:11:35 +00:00
|
|
|
delete(a, int64(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMapAssignStr(b *testing.B, n int) {
|
|
|
|
k := make([]string, n)
|
|
|
|
for i := 0; i < len(k); i++ {
|
|
|
|
k[i] = strconv.Itoa(i)
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
a := make(map[string]int)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
a[k[i&(n-1)]] = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func benchmarkMapOperatorAssignStr(b *testing.B, n int) {
|
|
|
|
k := make([]string, n)
|
|
|
|
for i := 0; i < len(k); i++ {
|
|
|
|
k[i] = strconv.Itoa(i)
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
a := make(map[string]string)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
key := k[i&(n-1)]
|
|
|
|
a[key] += key
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMapAppendAssignStr(b *testing.B, n int) {
|
|
|
|
k := make([]string, n)
|
|
|
|
for i := 0; i < len(k); i++ {
|
|
|
|
k[i] = strconv.Itoa(i)
|
|
|
|
}
|
|
|
|
a := make(map[string][]string)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
key := k[i&(n-1)]
|
|
|
|
a[key] = append(a[key], key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
func benchmarkMapDeleteStr(b *testing.B, n int) {
|
2018-01-09 01:23:08 +00:00
|
|
|
i2s := make([]string, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
i2s[i] = strconv.Itoa(i)
|
2017-09-14 17:11:35 +00:00
|
|
|
}
|
2018-01-09 01:23:08 +00:00
|
|
|
a := make(map[string]int, n)
|
2017-09-14 17:11:35 +00:00
|
|
|
b.ResetTimer()
|
2018-01-09 01:23:08 +00:00
|
|
|
k := 0
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if len(a) == 0 {
|
|
|
|
b.StopTimer()
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
a[i2s[j]] = j
|
|
|
|
}
|
|
|
|
k = i
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
|
|
|
delete(a, i2s[i-k])
|
2017-09-14 17:11:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-23 09:57:37 -08:00
|
|
|
func benchmarkMapDeletePointer(b *testing.B, n int) {
|
|
|
|
i2p := make([]*int, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
i2p[i] = new(int)
|
|
|
|
}
|
|
|
|
a := make(map[*int]int, n)
|
|
|
|
b.ResetTimer()
|
|
|
|
k := 0
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if len(a) == 0 {
|
|
|
|
b.StopTimer()
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
a[i2p[j]] = j
|
|
|
|
}
|
|
|
|
k = i
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
|
|
|
delete(a, i2p[i-k])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
func runWith(f func(*testing.B, int), v ...int) func(*testing.B) {
|
|
|
|
return func(b *testing.B) {
|
|
|
|
for _, n := range v {
|
|
|
|
b.Run(strconv.Itoa(n), func(b *testing.B) { f(b, n) })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMapAssign(b *testing.B) {
|
|
|
|
b.Run("Int32", runWith(benchmarkMapAssignInt32, 1<<8, 1<<16))
|
|
|
|
b.Run("Int64", runWith(benchmarkMapAssignInt64, 1<<8, 1<<16))
|
|
|
|
b.Run("Str", runWith(benchmarkMapAssignStr, 1<<8, 1<<16))
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:46:21 +00:00
|
|
|
func BenchmarkMapOperatorAssign(b *testing.B) {
|
|
|
|
b.Run("Int32", runWith(benchmarkMapOperatorAssignInt32, 1<<8, 1<<16))
|
|
|
|
b.Run("Int64", runWith(benchmarkMapOperatorAssignInt64, 1<<8, 1<<16))
|
|
|
|
b.Run("Str", runWith(benchmarkMapOperatorAssignStr, 1<<8, 1<<16))
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMapAppendAssign(b *testing.B) {
|
|
|
|
b.Run("Int32", runWith(benchmarkMapAppendAssignInt32, 1<<8, 1<<16))
|
|
|
|
b.Run("Int64", runWith(benchmarkMapAppendAssignInt64, 1<<8, 1<<16))
|
|
|
|
b.Run("Str", runWith(benchmarkMapAppendAssignStr, 1<<8, 1<<16))
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:11:35 +00:00
|
|
|
func BenchmarkMapDelete(b *testing.B) {
|
2018-01-09 01:23:08 +00:00
|
|
|
b.Run("Int32", runWith(benchmarkMapDeleteInt32, 100, 1000, 10000))
|
|
|
|
b.Run("Int64", runWith(benchmarkMapDeleteInt64, 100, 1000, 10000))
|
|
|
|
b.Run("Str", runWith(benchmarkMapDeleteStr, 100, 1000, 10000))
|
2020-12-23 09:57:37 -08:00
|
|
|
b.Run("Pointer", runWith(benchmarkMapDeletePointer, 100, 1000, 10000))
|
2017-09-14 17:11:35 +00:00
|
|
|
}
|
2018-09-24 21:46:21 +00:00
|
|
|
|
|
|
|
func TestDeferDeleteSlow(t *testing.T) {
|
|
|
|
ks := []complex128{0, 1, 2, 3}
|
|
|
|
|
2022-02-11 14:53:56 -08:00
|
|
|
m := make(map[any]int)
|
2018-09-24 21:46:21 +00:00
|
|
|
for i, k := range ks {
|
|
|
|
m[k] = i
|
|
|
|
}
|
|
|
|
if len(m) != len(ks) {
|
|
|
|
t.Errorf("want %d elements, got %d", len(ks), len(m))
|
|
|
|
}
|
|
|
|
|
|
|
|
func() {
|
|
|
|
for _, k := range ks {
|
|
|
|
defer delete(m, k)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if len(m) != 0 {
|
|
|
|
t.Errorf("want 0 elements, got %d", len(m))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestIncrementAfterDeleteValueInt and other test Issue 25936.
|
|
|
|
// Value types int, int32, int64 are affected. Value type string
|
|
|
|
// works as expected.
|
|
|
|
func TestIncrementAfterDeleteValueInt(t *testing.T) {
|
|
|
|
const key1 = 12
|
|
|
|
const key2 = 13
|
|
|
|
|
|
|
|
m := make(map[int]int)
|
|
|
|
m[key1] = 99
|
|
|
|
delete(m, key1)
|
|
|
|
m[key2]++
|
|
|
|
if n2 := m[key2]; n2 != 1 {
|
|
|
|
t.Errorf("incremented 0 to %d", n2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIncrementAfterDeleteValueInt32(t *testing.T) {
|
|
|
|
const key1 = 12
|
|
|
|
const key2 = 13
|
|
|
|
|
|
|
|
m := make(map[int]int32)
|
|
|
|
m[key1] = 99
|
|
|
|
delete(m, key1)
|
|
|
|
m[key2]++
|
|
|
|
if n2 := m[key2]; n2 != 1 {
|
|
|
|
t.Errorf("incremented 0 to %d", n2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIncrementAfterDeleteValueInt64(t *testing.T) {
|
|
|
|
const key1 = 12
|
|
|
|
const key2 = 13
|
|
|
|
|
|
|
|
m := make(map[int]int64)
|
|
|
|
m[key1] = 99
|
|
|
|
delete(m, key1)
|
|
|
|
m[key2]++
|
|
|
|
if n2 := m[key2]; n2 != 1 {
|
|
|
|
t.Errorf("incremented 0 to %d", n2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIncrementAfterDeleteKeyStringValueInt(t *testing.T) {
|
|
|
|
const key1 = ""
|
|
|
|
const key2 = "x"
|
|
|
|
|
|
|
|
m := make(map[string]int)
|
|
|
|
m[key1] = 99
|
|
|
|
delete(m, key1)
|
|
|
|
m[key2] += 1
|
|
|
|
if n2 := m[key2]; n2 != 1 {
|
|
|
|
t.Errorf("incremented 0 to %d", n2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIncrementAfterDeleteKeyValueString(t *testing.T) {
|
|
|
|
const key1 = ""
|
|
|
|
const key2 = "x"
|
|
|
|
|
|
|
|
m := make(map[string]string)
|
|
|
|
m[key1] = "99"
|
|
|
|
delete(m, key1)
|
|
|
|
m[key2] += "1"
|
|
|
|
if n2 := m[key2]; n2 != "1" {
|
|
|
|
t.Errorf("appended '1' to empty (nil) string, got %s", n2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestIncrementAfterBulkClearKeyStringValueInt tests that map bulk
|
|
|
|
// deletion (mapclear) still works as expected. Note that it was not
|
|
|
|
// affected by Issue 25936.
|
|
|
|
func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) {
|
|
|
|
const key1 = ""
|
|
|
|
const key2 = "x"
|
|
|
|
|
|
|
|
m := make(map[string]int)
|
|
|
|
m[key1] = 99
|
|
|
|
for k := range m {
|
|
|
|
delete(m, k)
|
|
|
|
}
|
|
|
|
m[key2]++
|
|
|
|
if n2 := m[key2]; n2 != 1 {
|
|
|
|
t.Errorf("incremented 0 to %d", n2)
|
|
|
|
}
|
|
|
|
}
|
2019-01-18 19:04:36 +00:00
|
|
|
|
|
|
|
func TestMapTombstones(t *testing.T) {
|
|
|
|
m := map[int]int{}
|
|
|
|
const N = 10000
|
|
|
|
// Fill a map.
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
m[i] = i
|
|
|
|
}
|
|
|
|
runtime.MapTombstoneCheck(m)
|
|
|
|
// Delete half of the entries.
|
|
|
|
for i := 0; i < N; i += 2 {
|
|
|
|
delete(m, i)
|
|
|
|
}
|
|
|
|
runtime.MapTombstoneCheck(m)
|
|
|
|
// Add new entries to fill in holes.
|
|
|
|
for i := N; i < 3*N/2; i++ {
|
|
|
|
m[i] = i
|
|
|
|
}
|
|
|
|
runtime.MapTombstoneCheck(m)
|
|
|
|
// Delete everything.
|
|
|
|
for i := 0; i < 3*N/2; i++ {
|
|
|
|
delete(m, i)
|
|
|
|
}
|
|
|
|
runtime.MapTombstoneCheck(m)
|
|
|
|
}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
|
|
|
|
type canString int
|
|
|
|
|
|
|
|
func (c canString) String() string {
|
|
|
|
return fmt.Sprintf("%d", int(c))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMapInterfaceKey(t *testing.T) {
|
|
|
|
// Test all the special cases in runtime.typehash.
|
|
|
|
type GrabBag struct {
|
|
|
|
f32 float32
|
|
|
|
f64 float64
|
|
|
|
c64 complex64
|
|
|
|
c128 complex128
|
|
|
|
s string
|
2022-02-11 14:53:56 -08:00
|
|
|
i0 any
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
i1 interface {
|
|
|
|
String() string
|
|
|
|
}
|
|
|
|
a [4]string
|
|
|
|
}
|
|
|
|
|
2022-02-11 14:53:56 -08:00
|
|
|
m := map[any]bool{}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 21:55:32 +00:00
|
|
|
// Put a bunch of data in m, so that a bad hash is likely to
|
|
|
|
// lead to a bad bucket, which will lead to a missed lookup.
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
m[i] = true
|
|
|
|
}
|
|
|
|
m[GrabBag{f32: 1.0}] = true
|
|
|
|
if !m[GrabBag{f32: 1.0}] {
|
|
|
|
panic("f32 not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{f64: 1.0}] = true
|
|
|
|
if !m[GrabBag{f64: 1.0}] {
|
|
|
|
panic("f64 not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{c64: 1.0i}] = true
|
|
|
|
if !m[GrabBag{c64: 1.0i}] {
|
|
|
|
panic("c64 not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{c128: 1.0i}] = true
|
|
|
|
if !m[GrabBag{c128: 1.0i}] {
|
|
|
|
panic("c128 not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{s: "foo"}] = true
|
|
|
|
if !m[GrabBag{s: "foo"}] {
|
|
|
|
panic("string not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{i0: "foo"}] = true
|
|
|
|
if !m[GrabBag{i0: "foo"}] {
|
|
|
|
panic("interface{} not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{i1: canString(5)}] = true
|
|
|
|
if !m[GrabBag{i1: canString(5)}] {
|
|
|
|
panic("interface{String() string} not found")
|
|
|
|
}
|
|
|
|
m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
|
|
|
|
if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
|
|
|
|
panic("array not found")
|
|
|
|
}
|
|
|
|
}
|