| |
| |
| |
|
|
| package time_test |
|
|
| import ( |
| "errors" |
| "fmt" |
| "internal/testenv" |
| "math/rand" |
| "runtime" |
| "strings" |
| "sync" |
| "sync/atomic" |
| "testing" |
| . "time" |
| _ "unsafe" |
| ) |
|
|
| |
| |
| |
| |
| func newTimerFunc(d Duration) *Timer { |
| c := make(chan Time, 1) |
| t := AfterFunc(d, func() { c <- Now() }) |
| t.C = c |
| return t |
| } |
|
|
| |
| |
| |
| var haveHighResSleep bool |
|
|
| |
| |
| |
| |
| |
| |
| func adjustDelay(t *testing.T, delay Duration) Duration { |
| if haveHighResSleep { |
| return delay |
| } |
| t.Log("adjusting delay for low resolution sleep") |
| switch runtime.GOOS { |
| case "windows": |
| return delay - 17*Millisecond |
| default: |
| t.Fatal("adjustDelay unimplemented on " + runtime.GOOS) |
| return 0 |
| } |
| } |
|
|
| func TestSleep(t *testing.T) { |
| const delay = 100 * Millisecond |
| go func() { |
| Sleep(delay / 2) |
| Interrupt() |
| }() |
| start := Now() |
| Sleep(delay) |
| delayadj := adjustDelay(t, delay) |
| duration := Since(start) |
| if duration < delayadj { |
| t.Fatalf("Sleep(%s) slept for only %s", delay, duration) |
| } |
| } |
|
|
| |
| |
| |
| func TestAfterFunc(t *testing.T) { |
| i := 10 |
| c := make(chan bool) |
| var f func() |
| f = func() { |
| i-- |
| if i >= 0 { |
| AfterFunc(0, f) |
| Sleep(1 * Second) |
| } else { |
| c <- true |
| } |
| } |
|
|
| AfterFunc(0, f) |
| <-c |
| } |
|
|
| func TestTickerStress(t *testing.T) { |
| var stop atomic.Bool |
| go func() { |
| for !stop.Load() { |
| runtime.GC() |
| |
| |
| |
| Sleep(Nanosecond) |
| } |
| }() |
| ticker := NewTicker(1) |
| for i := 0; i < 100; i++ { |
| <-ticker.C |
| } |
| ticker.Stop() |
| stop.Store(true) |
| } |
|
|
| func TestTickerConcurrentStress(t *testing.T) { |
| var stop atomic.Bool |
| go func() { |
| for !stop.Load() { |
| runtime.GC() |
| |
| |
| |
| Sleep(Nanosecond) |
| } |
| }() |
| ticker := NewTicker(1) |
| var wg sync.WaitGroup |
| for i := 0; i < 10; i++ { |
| wg.Add(1) |
| go func() { |
| defer wg.Done() |
| for i := 0; i < 100; i++ { |
| <-ticker.C |
| } |
| }() |
| } |
| wg.Wait() |
| ticker.Stop() |
| stop.Store(true) |
| } |
|
|
| func TestAfterFuncStarvation(t *testing.T) { |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) |
|
|
| var ( |
| wg sync.WaitGroup |
| stop atomic.Bool |
| c = make(chan bool, 1) |
| ) |
|
|
| wg.Add(2) |
| go func() { |
| for !stop.Load() { |
| c <- true |
| } |
| close(c) |
| wg.Done() |
| }() |
| go func() { |
| for range c { |
| } |
| wg.Done() |
| }() |
|
|
| AfterFunc(1*Microsecond, func() { stop.Store(true) }) |
| wg.Wait() |
| } |
|
|
| func benchmark(b *testing.B, bench func(*testing.PB)) { |
| |
| |
| var wg sync.WaitGroup |
| garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0)) |
| for i := range garbageAll { |
| wg.Add(1) |
| go func(i int) { |
| defer wg.Done() |
| garbage := make([]*Timer, 1<<15) |
| for j := range garbage { |
| garbage[j] = AfterFunc(Hour, nil) |
| } |
| garbageAll[i] = garbage |
| }(i) |
| } |
| wg.Wait() |
|
|
| b.ResetTimer() |
| b.RunParallel(bench) |
| b.StopTimer() |
|
|
| for _, garbage := range garbageAll { |
| for _, t := range garbage { |
| t.Stop() |
| } |
| } |
| } |
|
|
| func BenchmarkAfterFunc1000(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| n := 1000 |
| c := make(chan bool) |
| var f func() |
| f = func() { |
| n-- |
| if n >= 0 { |
| AfterFunc(0, f) |
| } else { |
| c <- true |
| } |
| } |
| AfterFunc(0, f) |
| <-c |
| } |
| }) |
| } |
|
|
| func BenchmarkAfter(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| <-After(1) |
| } |
| }) |
| } |
|
|
| func BenchmarkStop(b *testing.B) { |
| b.Run("impl=chan", func(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| NewTimer(1 * Second).Stop() |
| } |
| }) |
| }) |
| b.Run("impl=func", func(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| newTimerFunc(1 * Second).Stop() |
| } |
| }) |
| }) |
| } |
|
|
| func BenchmarkSimultaneousAfterFunc1000(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| n := 1000 |
| var wg sync.WaitGroup |
| wg.Add(n) |
| for range n { |
| AfterFunc(0, wg.Done) |
| } |
| wg.Wait() |
| } |
| }) |
| } |
|
|
| func BenchmarkStartStop1000(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| const N = 1000 |
| timers := make([]*Timer, N) |
| for i := range timers { |
| timers[i] = AfterFunc(Hour, nil) |
| } |
|
|
| for i := range timers { |
| timers[i].Stop() |
| } |
| } |
| }) |
| } |
|
|
| func BenchmarkReset(b *testing.B) { |
| b.Run("impl=chan", func(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| t := NewTimer(Hour) |
| for pb.Next() { |
| t.Reset(Hour) |
| } |
| t.Stop() |
| }) |
| }) |
| b.Run("impl=func", func(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| t := newTimerFunc(Hour) |
| for pb.Next() { |
| t.Reset(Hour) |
| } |
| t.Stop() |
| }) |
| }) |
| } |
|
|
| func BenchmarkSleep1000(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| const N = 1000 |
| var wg sync.WaitGroup |
| wg.Add(N) |
| for range N { |
| go func() { |
| Sleep(Nanosecond) |
| wg.Done() |
| }() |
| } |
| wg.Wait() |
| } |
| }) |
| } |
|
|
| func TestAfter(t *testing.T) { |
| const delay = 100 * Millisecond |
| start := Now() |
| end := <-After(delay) |
| delayadj := adjustDelay(t, delay) |
| if duration := Since(start); duration < delayadj { |
| t.Fatalf("After(%s) slept for only %d ns", delay, duration) |
| } |
| if min := start.Add(delayadj); end.Before(min) { |
| t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end) |
| } |
| } |
|
|
| func TestAfterTick(t *testing.T) { |
| t.Parallel() |
| const Count = 10 |
| Delta := 100 * Millisecond |
| if testing.Short() { |
| Delta = 10 * Millisecond |
| } |
| t0 := Now() |
| for i := 0; i < Count; i++ { |
| <-After(Delta) |
| } |
| t1 := Now() |
| d := t1.Sub(t0) |
| target := Delta * Count |
| if d < target*9/10 { |
| t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target) |
| } |
| if !testing.Short() && d > target*30/10 { |
| t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target) |
| } |
| } |
|
|
| func TestAfterStop(t *testing.T) { |
| t.Run("impl=chan", func(t *testing.T) { |
| testAfterStop(t, NewTimer) |
| }) |
| t.Run("impl=func", func(t *testing.T) { |
| testAfterStop(t, newTimerFunc) |
| }) |
| } |
|
|
| func testAfterStop(t *testing.T, newTimer func(Duration) *Timer) { |
| |
| |
| |
| |
| |
|
|
| var errs []string |
| logErrs := func() { |
| for _, e := range errs { |
| t.Log(e) |
| } |
| } |
|
|
| for i := 0; i < 5; i++ { |
| AfterFunc(100*Millisecond, func() {}) |
| t0 := newTimer(50 * Millisecond) |
| c1 := make(chan bool, 1) |
| t1 := AfterFunc(150*Millisecond, func() { c1 <- true }) |
| c2 := After(200 * Millisecond) |
| if !t0.Stop() { |
| errs = append(errs, "failed to stop event 0") |
| continue |
| } |
| if !t1.Stop() { |
| errs = append(errs, "failed to stop event 1") |
| continue |
| } |
| <-c2 |
| select { |
| case <-t0.C: |
| errs = append(errs, "event 0 was not stopped") |
| continue |
| case <-c1: |
| errs = append(errs, "event 1 was not stopped") |
| continue |
| default: |
| } |
| if t1.Stop() { |
| errs = append(errs, "Stop returned true twice") |
| continue |
| } |
|
|
| |
| if len(errs) > 0 { |
| t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs)) |
| logErrs() |
| } |
|
|
| return |
| } |
|
|
| t.Errorf("saw %d errors", len(errs)) |
| logErrs() |
| } |
|
|
| func TestAfterQueuing(t *testing.T) { |
| t.Run("impl=chan", func(t *testing.T) { |
| testAfterQueuing(t, After) |
| }) |
| t.Run("impl=func", func(t *testing.T) { |
| testAfterQueuing(t, func(d Duration) <-chan Time { return newTimerFunc(d).C }) |
| }) |
| } |
|
|
| func testAfterQueuing(t *testing.T, after func(Duration) <-chan Time) { |
| |
| |
| const attempts = 5 |
| err := errors.New("!=nil") |
| for i := 0; i < attempts && err != nil; i++ { |
| delta := Duration(20+i*50) * Millisecond |
| if err = testAfterQueuing1(delta, after); err != nil { |
| t.Logf("attempt %v failed: %v", i, err) |
| } |
| } |
| if err != nil { |
| t.Fatal(err) |
| } |
| } |
|
|
| var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0} |
|
|
| type afterResult struct { |
| slot int |
| t Time |
| } |
|
|
| func await(slot int, result chan<- afterResult, ac <-chan Time) { |
| result <- afterResult{slot, <-ac} |
| } |
|
|
| func testAfterQueuing1(delta Duration, after func(Duration) <-chan Time) error { |
| |
| |
| |
| result := make(chan afterResult, len(slots)) |
|
|
| t0 := Now() |
| for _, slot := range slots { |
| go await(slot, result, After(Duration(slot)*delta)) |
| } |
| var order []int |
| var times []Time |
| for range slots { |
| r := <-result |
| order = append(order, r.slot) |
| times = append(times, r.t) |
| } |
| for i := range order { |
| if i > 0 && order[i] < order[i-1] { |
| return fmt.Errorf("After calls returned out of order: %v", order) |
| } |
| } |
| for i, t := range times { |
| dt := t.Sub(t0) |
| target := Duration(order[i]) * delta |
| if dt < target-delta/2 || dt > target+delta*10 { |
| return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10) |
| } |
| } |
| return nil |
| } |
|
|
| func TestTimerStopStress(t *testing.T) { |
| if testing.Short() { |
| return |
| } |
| t.Parallel() |
| for i := 0; i < 100; i++ { |
| go func(i int) { |
| timer := AfterFunc(2*Second, func() { |
| t.Errorf("timer %d was not stopped", i) |
| }) |
| Sleep(1 * Second) |
| timer.Stop() |
| }(i) |
| } |
| Sleep(3 * Second) |
| } |
|
|
| func TestSleepZeroDeadlock(t *testing.T) { |
| |
| |
| |
| |
| defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) |
| c := make(chan bool) |
| go func() { |
| for i := 0; i < 100; i++ { |
| runtime.GC() |
| } |
| c <- true |
| }() |
| for i := 0; i < 100; i++ { |
| Sleep(0) |
| tmp := make(chan bool, 1) |
| tmp <- true |
| <-tmp |
| } |
| <-c |
| } |
|
|
| func testReset(d Duration) error { |
| t0 := NewTimer(2 * d) |
| Sleep(d) |
| if !t0.Reset(3 * d) { |
| return errors.New("resetting unfired timer returned false") |
| } |
| Sleep(2 * d) |
| select { |
| case <-t0.C: |
| return errors.New("timer fired early") |
| default: |
| } |
| Sleep(2 * d) |
| select { |
| case <-t0.C: |
| default: |
| return errors.New("reset timer did not fire") |
| } |
|
|
| if t0.Reset(50 * Millisecond) { |
| return errors.New("resetting expired timer returned true") |
| } |
| return nil |
| } |
|
|
| func TestReset(t *testing.T) { |
| |
| |
| |
| |
| |
| |
| d := 1 * Millisecond |
| const maxDuration = 10 * Second |
| for { |
| err := testReset(d) |
| if err == nil { |
| break |
| } |
| d *= 2 |
| if d > maxDuration { |
| t.Error(err) |
| } |
| t.Logf("%v; trying duration %v", err, d) |
| } |
| } |
|
|
| |
| |
| |
| |
| func TestOverflowSleep(t *testing.T) { |
| const big = Duration(int64(1<<63 - 1)) |
|
|
| go func() { |
| Sleep(big) |
| |
| |
| panic("big sleep returned") |
| }() |
|
|
| select { |
| case <-After(big): |
| t.Fatalf("big timeout fired") |
| case <-After(25 * Millisecond): |
| |
| } |
|
|
| const neg = Duration(-1 << 63) |
| Sleep(neg) |
| select { |
| case <-After(neg): |
| |
| case <-After(1 * Second): |
| t.Fatalf("negative timeout didn't fire") |
| } |
| } |
|
|
| |
| |
| func TestIssue5745(t *testing.T) { |
| ticker := NewTicker(Hour) |
| defer func() { |
| |
| |
| ticker.Stop() |
|
|
| if r := recover(); r == nil { |
| t.Error("Expected panic, but none happened.") |
| } |
| }() |
|
|
| |
| var timer *Timer |
| timer.Stop() |
| t.Error("Should be unreachable.") |
| } |
|
|
| func TestOverflowPeriodRuntimeTimer(t *testing.T) { |
| |
| |
| CheckRuntimeTimerPeriodOverflow() |
| } |
|
|
| func checkZeroPanicString(t *testing.T) { |
| e := recover() |
| s, _ := e.(string) |
| if want := "called on uninitialized Timer"; !strings.Contains(s, want) { |
| t.Errorf("panic = %v; want substring %q", e, want) |
| } |
| } |
|
|
| func TestZeroTimerResetPanics(t *testing.T) { |
| defer checkZeroPanicString(t) |
| var tr Timer |
| tr.Reset(1) |
| } |
|
|
| func TestZeroTimerStopPanics(t *testing.T) { |
| defer checkZeroPanicString(t) |
| var tr Timer |
| tr.Stop() |
| } |
|
|
| |
| func TestZeroTimer(t *testing.T) { |
| t.Run("impl=chan", func(t *testing.T) { |
| testZeroTimer(t, NewTimer) |
| }) |
| t.Run("impl=func", func(t *testing.T) { |
| testZeroTimer(t, newTimerFunc) |
| }) |
| t.Run("impl=cache", func(t *testing.T) { |
| timer := newTimerFunc(Hour) |
| testZeroTimer(t, func(d Duration) *Timer { |
| timer.Reset(d) |
| return timer |
| }) |
| }) |
| } |
|
|
| func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) { |
| if testing.Short() { |
| t.Skip("-short") |
| } |
|
|
| for i := 0; i < 1000000; i++ { |
| s := Now() |
| ti := newTimer(0) |
| <-ti.C |
| if diff := Since(s); diff > 2*Second { |
| t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff) |
| } |
| } |
| } |
|
|
| |
| |
| func TestTimerModifiedEarlier(t *testing.T) { |
| if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" { |
| testenv.SkipFlaky(t, 50470) |
| } |
|
|
| past := Until(Unix(0, 0)) |
| count := 1000 |
| fail := 0 |
| for i := 0; i < count; i++ { |
| timer := newTimerFunc(Hour) |
| for j := 0; j < 10; j++ { |
| if !timer.Stop() { |
| <-timer.C |
| } |
| timer.Reset(past) |
| } |
|
|
| deadline := NewTimer(10 * Second) |
| defer deadline.Stop() |
| now := Now() |
| select { |
| case <-timer.C: |
| if since := Since(now); since > 8*Second { |
| t.Errorf("timer took too long (%v)", since) |
| fail++ |
| } |
| case <-deadline.C: |
| t.Error("deadline expired") |
| } |
| } |
|
|
| if fail > 0 { |
| t.Errorf("%d failures", fail) |
| } |
| } |
|
|
| |
| |
| |
| func TestAdjustTimers(t *testing.T) { |
| var rnd = rand.New(rand.NewSource(Now().UnixNano())) |
|
|
| timers := make([]*Timer, 100) |
| states := make([]int, len(timers)) |
| indices := rnd.Perm(len(timers)) |
|
|
| for len(indices) != 0 { |
| var ii = rnd.Intn(len(indices)) |
| var i = indices[ii] |
|
|
| var timer = timers[i] |
| var state = states[i] |
| states[i]++ |
|
|
| switch state { |
| case 0: |
| timers[i] = newTimerFunc(0) |
|
|
| case 1: |
| <-timer.C |
|
|
| |
| case 2: |
| if timer.Reset(1 * Minute) { |
| panic("shouldn't be active (1)") |
| } |
| case 4: |
| if timer.Reset(3 * Minute) { |
| panic("shouldn't be active (3)") |
| } |
| case 6: |
| if timer.Reset(2 * Minute) { |
| panic("shouldn't be active (2)") |
| } |
|
|
| |
| case 3, 5, 7: |
| if !timer.Stop() { |
| t.Logf("timer %d state %d Stop returned false", i, state) |
| <-timer.C |
| } |
|
|
| |
| case 8: |
| if timer.Reset(0) { |
| t.Fatal("timer.Reset returned true") |
| } |
| case 9: |
| now := Now() |
| <-timer.C |
| dur := Since(now) |
| if dur > 750*Millisecond { |
| t.Errorf("timer %d took %v to complete", i, dur) |
| } |
|
|
| |
| case 10: |
| indices[ii] = indices[len(indices)-1] |
| indices = indices[:len(indices)-1] |
| } |
| } |
| } |
|
|
| func TestStopResult(t *testing.T) { |
| testStopResetResult(t, true) |
| } |
|
|
| func TestResetResult(t *testing.T) { |
| testStopResetResult(t, false) |
| } |
|
|
| |
| |
| |
| func testStopResetResult(t *testing.T, testStop bool) { |
| for _, name := range []string{"0", "1", "2"} { |
| t.Run("asynctimerchan="+name, func(t *testing.T) { |
| testStopResetResultGODEBUG(t, testStop, name) |
| }) |
| } |
| } |
|
|
| func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { |
| t.Setenv("GODEBUG", "asynctimerchan="+godebug) |
|
|
| stopOrReset := func(timer *Timer) bool { |
| if testStop { |
| return timer.Stop() |
| } else { |
| return timer.Reset(1 * Hour) |
| } |
| } |
|
|
| start := make(chan struct{}) |
| var wg sync.WaitGroup |
| const N = 1000 |
| wg.Add(N) |
| for range N { |
| go func() { |
| defer wg.Done() |
| <-start |
| for j := 0; j < 100; j++ { |
| timer1 := NewTimer(1 * Millisecond) |
| timer2 := NewTimer(1 * Millisecond) |
| select { |
| case <-timer1.C: |
| if !stopOrReset(timer2) { |
| |
| |
| <-timer2.C |
| } |
| case <-timer2.C: |
| if !stopOrReset(timer1) { |
| |
| |
| <-timer1.C |
| } |
| } |
| } |
| }() |
| } |
| close(start) |
| wg.Wait() |
| } |
|
|
| |
| |
| func TestMultiWakeupTicker(t *testing.T) { |
| if testing.Short() { |
| t.Skip("-short") |
| } |
|
|
| goroutines := runtime.GOMAXPROCS(0) |
| timer := NewTicker(Microsecond) |
| var wg sync.WaitGroup |
| wg.Add(goroutines) |
| for range goroutines { |
| go func() { |
| defer wg.Done() |
| for range 100000 { |
| select { |
| case <-timer.C: |
| case <-After(Millisecond): |
| } |
| } |
| }() |
| } |
| wg.Wait() |
| } |
|
|
| |
| |
| func TestMultiWakeupTimer(t *testing.T) { |
| if testing.Short() { |
| t.Skip("-short") |
| } |
|
|
| goroutines := runtime.GOMAXPROCS(0) |
| timer := NewTimer(Nanosecond) |
| var wg sync.WaitGroup |
| wg.Add(goroutines) |
| for range goroutines { |
| go func() { |
| defer wg.Done() |
| for range 10000 { |
| select { |
| case <-timer.C: |
| default: |
| } |
| timer.Reset(Nanosecond) |
| } |
| }() |
| } |
| wg.Wait() |
| } |
|
|
| |
| |
| |
| func BenchmarkParallelTimerLatency(b *testing.B) { |
| gmp := runtime.GOMAXPROCS(0) |
| if gmp < 2 || runtime.NumCPU() < gmp { |
| b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS") |
| } |
|
|
| |
| timerCount := gmp - 1 |
| stats := make([]struct { |
| sum float64 |
| max Duration |
| count int64 |
| _ [5]int64 |
| }, timerCount) |
|
|
| |
| |
| warmupScheduler(gmp) |
|
|
| |
| |
| |
| |
|
|
| |
| doWork(30 * Millisecond) |
|
|
| b.ResetTimer() |
|
|
| const delay = Millisecond |
| var wg sync.WaitGroup |
| var count int32 |
| for i := 0; i < b.N; i++ { |
| wg.Add(timerCount) |
| atomic.StoreInt32(&count, 0) |
| for j := 0; j < timerCount; j++ { |
| expectedWakeup := Now().Add(delay) |
| AfterFunc(delay, func() { |
| late := Since(expectedWakeup) |
| if late < 0 { |
| late = 0 |
| } |
| stats[j].count++ |
| stats[j].sum += float64(late.Nanoseconds()) |
| if late > stats[j].max { |
| stats[j].max = late |
| } |
| atomic.AddInt32(&count, 1) |
| for atomic.LoadInt32(&count) < int32(timerCount) { |
| |
| } |
| wg.Done() |
| }) |
| } |
|
|
| for atomic.LoadInt32(&count) < int32(timerCount) { |
| |
| } |
| wg.Wait() |
|
|
| |
| |
| doWork(Millisecond) |
| } |
| var total float64 |
| var samples float64 |
| maximum := Duration(0) |
| for _, s := range stats { |
| maximum = max(maximum, s.max) |
| total += s.sum |
| samples += float64(s.count) |
| } |
| b.ReportMetric(0, "ns/op") |
| b.ReportMetric(total/samples, "avg-late-ns") |
| b.ReportMetric(float64(maximum.Nanoseconds()), "max-late-ns") |
| } |
|
|
| |
| |
| func BenchmarkStaggeredTickerLatency(b *testing.B) { |
| gmp := runtime.GOMAXPROCS(0) |
| if gmp < 2 || runtime.NumCPU() < gmp { |
| b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS") |
| } |
|
|
| const delay = 3 * Millisecond |
|
|
| for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} { |
| b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) { |
| for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ { |
| tickerCount := gmp * tickersPerP |
| b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) { |
| |
| stats := make([]struct { |
| sum float64 |
| max Duration |
| count int64 |
| _ [5]int64 |
| }, tickerCount) |
|
|
| |
| |
| warmupScheduler(gmp) |
|
|
| b.ResetTimer() |
|
|
| var wg sync.WaitGroup |
| wg.Add(tickerCount) |
| for j := 0; j < tickerCount; j++ { |
| doWork(delay / Duration(gmp)) |
| expectedWakeup := Now().Add(delay) |
| ticker := NewTicker(delay) |
| go func(c int, ticker *Ticker, firstWake Time) { |
| defer ticker.Stop() |
|
|
| for ; c > 0; c-- { |
| <-ticker.C |
| late := Since(expectedWakeup) |
| if late < 0 { |
| late = 0 |
| } |
| stats[j].count++ |
| stats[j].sum += float64(late.Nanoseconds()) |
| if late > stats[j].max { |
| stats[j].max = late |
| } |
| expectedWakeup = expectedWakeup.Add(delay) |
| doWork(dur) |
| } |
| wg.Done() |
| }(b.N, ticker, expectedWakeup) |
| } |
| wg.Wait() |
|
|
| var total float64 |
| var samples float64 |
| max := Duration(0) |
| for _, s := range stats { |
| if s.max > max { |
| max = s.max |
| } |
| total += s.sum |
| samples += float64(s.count) |
| } |
| b.ReportMetric(0, "ns/op") |
| b.ReportMetric(total/samples, "avg-late-ns") |
| b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns") |
| }) |
| } |
| }) |
| } |
| } |
|
|
| |
| |
| func warmupScheduler(targetThreadCount int) { |
| var wg sync.WaitGroup |
| var count int32 |
| for i := 0; i < targetThreadCount; i++ { |
| wg.Add(1) |
| go func() { |
| atomic.AddInt32(&count, 1) |
| for atomic.LoadInt32(&count) < int32(targetThreadCount) { |
| |
| } |
|
|
| |
| doWork(Millisecond) |
| wg.Done() |
| }() |
| } |
| wg.Wait() |
| } |
|
|
| func doWork(dur Duration) { |
| start := Now() |
| for Since(start) < dur { |
| } |
| } |
|
|
| func BenchmarkAdjustTimers10000(b *testing.B) { |
| benchmark(b, func(pb *testing.PB) { |
| for pb.Next() { |
| const n = 10000 |
| timers := make([]*Timer, 0, n) |
| for range n { |
| t := AfterFunc(Hour, func() {}) |
| timers = append(timers, t) |
| } |
| timers[n-1].Reset(Nanosecond) |
| Sleep(Microsecond) |
| for _, t := range timers { |
| t.Stop() |
| } |
| } |
| }) |
| } |
|
|