// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Pool is no-op under race detector, so all these tests do not work. // +build !race package sync_test import ( "runtime" "runtime/debug" . "sync" "sync/atomic" "testing" "time" ) func TestPool(t *testing.T) { // disable GC so we can control when it happens. defer debug.SetGCPercent(debug.SetGCPercent(-1)) var p Pool if p.Get() != nil { t.Fatal("expected empty") } // Make sure that the goroutine doesn't migrate to another P // between Put and Get calls. Runtime_procPin() p.Put("a") p.Put("b") if g := p.Get(); g != "a" { t.Fatalf("got %#v; want a", g) } if g := p.Get(); g != "b" { t.Fatalf("got %#v; want b", g) } if g := p.Get(); g != nil { t.Fatalf("got %#v; want nil", g) } Runtime_procUnpin() p.Put("c") debug.SetGCPercent(100) // to allow following GC to actually run runtime.GC() if g := p.Get(); g != nil { t.Fatalf("got %#v; want nil after GC", g) } } func TestPoolNew(t *testing.T) { // disable GC so we can control when it happens. defer debug.SetGCPercent(debug.SetGCPercent(-1)) i := 0 p := Pool{ New: func() interface{} { i++ return i }, } if v := p.Get(); v != 1 { t.Fatalf("got %v; want 1", v) } if v := p.Get(); v != 2 { t.Fatalf("got %v; want 2", v) } // Make sure that the goroutine doesn't migrate to another P // between Put and Get calls. Runtime_procPin() p.Put(42) if v := p.Get(); v != 42 { t.Fatalf("got %v; want 42", v) } Runtime_procUnpin() if v := p.Get(); v != 3 { t.Fatalf("got %v; want 3", v) } } // Test that Pool does not hold pointers to previously cached resources. func TestPoolGC(t *testing.T) { testPool(t, true) } // Test that Pool releases resources on GC. func TestPoolRelease(t *testing.T) { testPool(t, false) } func testPool(t *testing.T, drain bool) { var p Pool const N = 100 loop: for try := 0; try < 3; try++ { var fin, fin1 uint32 for i := 0; i < N; i++ { v := new(string) runtime.SetFinalizer(v, func(vv *string) { atomic.AddUint32(&fin, 1) }) p.Put(v) } if drain { for i := 0; i < N; i++ { p.Get() } } for i := 0; i < 5; i++ { runtime.GC() time.Sleep(time.Duration(i*100+10) * time.Millisecond) // 1 pointer can remain on stack or elsewhere if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { continue loop } } t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) } } func TestPoolStress(t *testing.T) { const P = 10 N := int(1e6) if testing.Short() { N /= 100 } var p Pool done := make(chan bool) for i := 0; i < P; i++ { go func() { var v interface{} = 0 for j := 0; j < N; j++ { if v == nil { v = 0 } p.Put(v) v = p.Get() if v != nil && v.(int) != 0 { t.Errorf("expect 0, got %v", v) break } } done <- true }() } for i := 0; i < P; i++ { <-done } } func BenchmarkPool(b *testing.B) { var p Pool b.RunParallel(func(pb *testing.PB) { for pb.Next() { p.Put(1) p.Get() } }) } func BenchmarkPoolOverflow(b *testing.B) { var p Pool b.RunParallel(func(pb *testing.PB) { for pb.Next() { for b := 0; b < 100; b++ { p.Put(1) } for b := 0; b < 100; b++ { p.Get() } } }) }