| | |
| | |
| | |
| |
|
| | package main |
| |
|
| | import ( |
| | "fmt" |
| | "internal/trace" |
| | "internal/trace/traceviewer" |
| | "strings" |
| | ) |
| |
|
| | |
| | |
| | |
| | type generator interface { |
| | |
| | Sync() |
| | StackSample(ctx *traceContext, ev *trace.Event) |
| | GlobalRange(ctx *traceContext, ev *trace.Event) |
| | GlobalMetric(ctx *traceContext, ev *trace.Event) |
| |
|
| | |
| | GoroutineLabel(ctx *traceContext, ev *trace.Event) |
| | GoroutineRange(ctx *traceContext, ev *trace.Event) |
| | GoroutineTransition(ctx *traceContext, ev *trace.Event) |
| |
|
| | |
| | ProcRange(ctx *traceContext, ev *trace.Event) |
| | ProcTransition(ctx *traceContext, ev *trace.Event) |
| |
|
| | |
| | Log(ctx *traceContext, ev *trace.Event) |
| |
|
| | |
| | Finish(ctx *traceContext) |
| | } |
| |
|
| | |
| | func runGenerator(ctx *traceContext, g generator, parsed *parsedTrace, opts *genOpts) { |
| | for i := range parsed.events { |
| | ev := &parsed.events[i] |
| |
|
| | switch ev.Kind() { |
| | case trace.EventSync: |
| | g.Sync() |
| | case trace.EventStackSample: |
| | g.StackSample(ctx, ev) |
| | case trace.EventRangeBegin, trace.EventRangeActive, trace.EventRangeEnd: |
| | r := ev.Range() |
| | switch r.Scope.Kind { |
| | case trace.ResourceGoroutine: |
| | g.GoroutineRange(ctx, ev) |
| | case trace.ResourceProc: |
| | g.ProcRange(ctx, ev) |
| | case trace.ResourceNone: |
| | g.GlobalRange(ctx, ev) |
| | } |
| | case trace.EventMetric: |
| | g.GlobalMetric(ctx, ev) |
| | case trace.EventLabel: |
| | l := ev.Label() |
| | if l.Resource.Kind == trace.ResourceGoroutine { |
| | g.GoroutineLabel(ctx, ev) |
| | } |
| | case trace.EventStateTransition: |
| | switch ev.StateTransition().Resource.Kind { |
| | case trace.ResourceProc: |
| | g.ProcTransition(ctx, ev) |
| | case trace.ResourceGoroutine: |
| | g.GoroutineTransition(ctx, ev) |
| | } |
| | case trace.EventLog: |
| | g.Log(ctx, ev) |
| | } |
| | } |
| | for i, task := range opts.tasks { |
| | emitTask(ctx, task, i) |
| | if opts.mode&traceviewer.ModeGoroutineOriented != 0 { |
| | for _, region := range task.Regions { |
| | emitRegion(ctx, region) |
| | } |
| | } |
| | } |
| | g.Finish(ctx) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | func emitTask(ctx *traceContext, task *trace.UserTaskSummary, sortIndex int) { |
| | |
| | var startStack, endStack trace.Stack |
| | var startG, endG trace.GoID |
| | startTime, endTime := ctx.startTime, ctx.endTime |
| | if task.Start != nil { |
| | startStack = task.Start.Stack() |
| | startG = task.Start.Goroutine() |
| | startTime = task.Start.Time() |
| | } |
| | if task.End != nil { |
| | endStack = task.End.Stack() |
| | endG = task.End.Goroutine() |
| | endTime = task.End.Time() |
| | } |
| | arg := struct { |
| | ID uint64 `json:"id"` |
| | StartG uint64 `json:"start_g,omitempty"` |
| | EndG uint64 `json:"end_g,omitempty"` |
| | }{ |
| | ID: uint64(task.ID), |
| | StartG: uint64(startG), |
| | EndG: uint64(endG), |
| | } |
| |
|
| | |
| | ctx.Task(uint64(task.ID), fmt.Sprintf("T%d %s", task.ID, task.Name), sortIndex) |
| | ctx.TaskSlice(traceviewer.SliceEvent{ |
| | Name: task.Name, |
| | Ts: ctx.elapsed(startTime), |
| | Dur: endTime.Sub(startTime), |
| | Resource: uint64(task.ID), |
| | Stack: ctx.Stack(viewerFrames(startStack)), |
| | EndStack: ctx.Stack(viewerFrames(endStack)), |
| | Arg: arg, |
| | }) |
| | |
| | if task.Parent != nil && task.Start != nil && task.Start.Kind() == trace.EventTaskBegin { |
| | ctx.TaskArrow(traceviewer.ArrowEvent{ |
| | Name: "newTask", |
| | Start: ctx.elapsed(task.Start.Time()), |
| | End: ctx.elapsed(task.Start.Time()), |
| | FromResource: uint64(task.Parent.ID), |
| | ToResource: uint64(task.ID), |
| | FromStack: ctx.Stack(viewerFrames(task.Start.Stack())), |
| | }) |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | func emitRegion(ctx *traceContext, region *trace.UserRegionSummary) { |
| | if region.Name == "" { |
| | return |
| | } |
| | |
| | var startStack, endStack trace.Stack |
| | goroutine := trace.NoGoroutine |
| | startTime, endTime := ctx.startTime, ctx.endTime |
| | if region.Start != nil { |
| | startStack = region.Start.Stack() |
| | startTime = region.Start.Time() |
| | goroutine = region.Start.Goroutine() |
| | } |
| | if region.End != nil { |
| | endStack = region.End.Stack() |
| | endTime = region.End.Time() |
| | goroutine = region.End.Goroutine() |
| | } |
| | if goroutine == trace.NoGoroutine { |
| | return |
| | } |
| | arg := struct { |
| | TaskID uint64 `json:"taskid"` |
| | }{ |
| | TaskID: uint64(region.TaskID), |
| | } |
| | ctx.AsyncSlice(traceviewer.AsyncSliceEvent{ |
| | SliceEvent: traceviewer.SliceEvent{ |
| | Name: region.Name, |
| | Ts: ctx.elapsed(startTime), |
| | Dur: endTime.Sub(startTime), |
| | Resource: uint64(goroutine), |
| | Stack: ctx.Stack(viewerFrames(startStack)), |
| | EndStack: ctx.Stack(viewerFrames(endStack)), |
| | Arg: arg, |
| | }, |
| | Category: "Region", |
| | Scope: fmt.Sprintf("%x", region.TaskID), |
| | TaskColorIndex: uint64(region.TaskID), |
| | }) |
| | } |
| |
|
| | |
| |
|
| | |
| | |
| | type stackSampleGenerator[R resource] struct { |
| | |
| | getResource func(*trace.Event) R |
| | } |
| |
|
| | |
| | func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *trace.Event) { |
| | id := g.getResource(ev) |
| | if id == R(noResource) { |
| | |
| | return |
| | } |
| | ctx.Instant(traceviewer.InstantEvent{ |
| | Name: "CPU profile sample", |
| | Ts: ctx.elapsed(ev.Time()), |
| | Resource: uint64(id), |
| | Stack: ctx.Stack(viewerFrames(ev.Stack())), |
| | }) |
| | } |
| |
|
| | |
| | |
| | type globalRangeGenerator struct { |
| | ranges map[string]activeRange |
| | seenSync int |
| | } |
| |
|
| | |
| | func (g *globalRangeGenerator) Sync() { |
| | g.seenSync++ |
| | } |
| |
|
| | |
| | |
| | func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *trace.Event) { |
| | if g.ranges == nil { |
| | g.ranges = make(map[string]activeRange) |
| | } |
| | r := ev.Range() |
| | switch ev.Kind() { |
| | case trace.EventRangeBegin: |
| | g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()} |
| | case trace.EventRangeActive: |
| | |
| | |
| | if g.seenSync < 2 { |
| | |
| | g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()} |
| | } |
| | case trace.EventRangeEnd: |
| | |
| | |
| | ar := g.ranges[r.Name] |
| | if strings.Contains(r.Name, "GC") { |
| | ctx.Slice(traceviewer.SliceEvent{ |
| | Name: r.Name, |
| | Ts: ctx.elapsed(ar.time), |
| | Dur: ev.Time().Sub(ar.time), |
| | Resource: traceviewer.GCP, |
| | Stack: ctx.Stack(viewerFrames(ar.stack)), |
| | EndStack: ctx.Stack(viewerFrames(ev.Stack())), |
| | }) |
| | } |
| | delete(g.ranges, r.Name) |
| | } |
| | } |
| |
|
| | |
| | func (g *globalRangeGenerator) Finish(ctx *traceContext) { |
| | for name, ar := range g.ranges { |
| | if !strings.Contains(name, "GC") { |
| | continue |
| | } |
| | ctx.Slice(traceviewer.SliceEvent{ |
| | Name: name, |
| | Ts: ctx.elapsed(ar.time), |
| | Dur: ctx.endTime.Sub(ar.time), |
| | Resource: traceviewer.GCP, |
| | Stack: ctx.Stack(viewerFrames(ar.stack)), |
| | }) |
| | } |
| | } |
| |
|
| | |
| | type globalMetricGenerator struct { |
| | } |
| |
|
| | |
| | func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *trace.Event) { |
| | m := ev.Metric() |
| | switch m.Name { |
| | case "/memory/classes/heap/objects:bytes": |
| | ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64()) |
| | case "/gc/heap/goal:bytes": |
| | ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64()) |
| | case "/sched/gomaxprocs:threads": |
| | ctx.Gomaxprocs(m.Value.Uint64()) |
| | } |
| | } |
| |
|
| | |
| | |
| | type procRangeGenerator struct { |
| | ranges map[trace.Range]activeRange |
| | seenSync int |
| | } |
| |
|
| | |
| | func (g *procRangeGenerator) Sync() { |
| | g.seenSync++ |
| | } |
| |
|
| | |
| | |
| | func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *trace.Event) { |
| | if g.ranges == nil { |
| | g.ranges = make(map[trace.Range]activeRange) |
| | } |
| | r := ev.Range() |
| | switch ev.Kind() { |
| | case trace.EventRangeBegin: |
| | g.ranges[r] = activeRange{ev.Time(), ev.Stack()} |
| | case trace.EventRangeActive: |
| | |
| | |
| | if g.seenSync < 2 { |
| | |
| | g.ranges[r] = activeRange{ctx.startTime, ev.Stack()} |
| | } |
| | case trace.EventRangeEnd: |
| | |
| | ar := g.ranges[r] |
| | ctx.Slice(traceviewer.SliceEvent{ |
| | Name: r.Name, |
| | Ts: ctx.elapsed(ar.time), |
| | Dur: ev.Time().Sub(ar.time), |
| | Resource: uint64(r.Scope.Proc()), |
| | Stack: ctx.Stack(viewerFrames(ar.stack)), |
| | EndStack: ctx.Stack(viewerFrames(ev.Stack())), |
| | }) |
| | delete(g.ranges, r) |
| | } |
| | } |
| |
|
| | |
| | func (g *procRangeGenerator) Finish(ctx *traceContext) { |
| | for r, ar := range g.ranges { |
| | ctx.Slice(traceviewer.SliceEvent{ |
| | Name: r.Name, |
| | Ts: ctx.elapsed(ar.time), |
| | Dur: ctx.endTime.Sub(ar.time), |
| | Resource: uint64(r.Scope.Proc()), |
| | Stack: ctx.Stack(viewerFrames(ar.stack)), |
| | }) |
| | } |
| | } |
| |
|
| | |
| | type activeRange struct { |
| | time trace.Time |
| | stack trace.Stack |
| | } |
| |
|
| | |
| | type completedRange struct { |
| | name string |
| | startTime trace.Time |
| | endTime trace.Time |
| | startStack trace.Stack |
| | endStack trace.Stack |
| | arg any |
| | } |
| |
|
| | type logEventGenerator[R resource] struct { |
| | |
| | getResource func(*trace.Event) R |
| | } |
| |
|
| | |
| | func (g *logEventGenerator[R]) Log(ctx *traceContext, ev *trace.Event) { |
| | id := g.getResource(ev) |
| | if id == R(noResource) { |
| | |
| | return |
| | } |
| |
|
| | |
| | log := ev.Log() |
| | name := log.Message |
| | if log.Category != "" { |
| | name = "[" + log.Category + "] " + name |
| | } |
| |
|
| | |
| | ctx.Instant(traceviewer.InstantEvent{ |
| | Name: name, |
| | Ts: ctx.elapsed(ev.Time()), |
| | Category: "user event", |
| | Resource: uint64(id), |
| | Stack: ctx.Stack(viewerFrames(ev.Stack())), |
| | }) |
| | } |
| |
|