runtime: Remove now unnecessary pad field from ParFor.

It is not needed due to the removal of the ctx field.
    
    Reviewed-on: https://go-review.googlesource.com/16525

From-SVN: r229616
This commit is contained in:
Ian Lance Taylor 2015-10-31 00:59:47 +00:00
parent 725e1be340
commit af146490bb
1007 changed files with 86529 additions and 30520 deletions

View file

@ -497,7 +497,7 @@ func TestTxStmt(t *testing.T) {
}
}
// Issue: http://golang.org/issue/2784
// Issue: https://golang.org/issue/2784
// This test didn't fail before because we got lucky with the fakedb driver.
// It was failing, and now not, in github.com/bradfitz/go-sql-test
func TestTxQuery(t *testing.T) {
@ -1070,6 +1070,57 @@ func TestMaxOpenConns(t *testing.T) {
}
}
// Issue 9453: tests that SetMaxOpenConns can be lowered at runtime
// and affects the subsequent release of connections.
func TestMaxOpenConnsOnBusy(t *testing.T) {
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(3)
conn0, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn1, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn2, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
db.SetMaxOpenConns(2)
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn0.releaseConn(nil)
conn1.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn2.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
}
func TestSingleOpenConn(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
@ -1093,6 +1144,26 @@ func TestSingleOpenConn(t *testing.T) {
}
}
func TestStats(t *testing.T) {
db := newTestDB(t, "people")
stats := db.Stats()
if got := stats.OpenConnections; got != 1 {
t.Errorf("stats.OpenConnections = %d; want 1", got)
}
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
closeDB(t, db)
stats = db.Stats()
if got := stats.OpenConnections; got != 0 {
t.Errorf("stats.OpenConnections = %d; want 0", got)
}
}
// golang.org/issue/5323
func TestStmtCloseDeps(t *testing.T) {
if testing.Short() {
@ -1314,7 +1385,80 @@ func TestStmtCloseOrder(t *testing.T) {
}
}
// golang.org/issue/5781
// Test cases where there's more than maxBadConnRetries bad connections in the
// pool (issue 8834)
func TestManyErrBadConn(t *testing.T) {
manyErrBadConnSetup := func() *DB {
db := newTestDB(t, "people")
nconn := maxBadConnRetries + 1
db.SetMaxIdleConns(nconn)
db.SetMaxOpenConns(nconn)
// open enough connections
func() {
for i := 0; i < nconn; i++ {
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
}
}()
if db.numOpen != nconn {
t.Fatalf("unexpected numOpen %d (was expecting %d)", db.numOpen, nconn)
} else if len(db.freeConn) != nconn {
t.Fatalf("unexpected len(db.freeConn) %d (was expecting %d)", len(db.freeConn), nconn)
}
for _, conn := range db.freeConn {
conn.ci.(*fakeConn).stickyBad = true
}
return db
}
// Query
db := manyErrBadConnSetup()
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// Exec
db = manyErrBadConnSetup()
defer closeDB(t, db)
_, err = db.Exec("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
// Begin
db = manyErrBadConnSetup()
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
// Prepare
db = manyErrBadConnSetup()
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
}
// golang.org/issue/5718
func TestErrBadConnReconnect(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
@ -1764,56 +1908,6 @@ func doConcurrentTest(t testing.TB, ct concurrentTest) {
wg.Wait()
}
func manyConcurrentQueries(t testing.TB) {
maxProcs, numReqs := 16, 500
if testing.Short() {
maxProcs, numReqs = 4, 50
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
var wg sync.WaitGroup
wg.Add(numReqs)
reqs := make(chan bool)
defer close(reqs)
for i := 0; i < maxProcs*2; i++ {
go func() {
for range reqs {
rows, err := stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
wg.Done()
continue
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
wg.Done()
}
}()
}
for i := 0; i < numReqs; i++ {
reqs <- true
}
wg.Wait()
}
func TestIssue6081(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
@ -1985,3 +2079,31 @@ func BenchmarkConcurrentRandom(b *testing.B) {
doConcurrentTest(b, ct)
}
}
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}