| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | package fuzz |
| |
|
| | import ( |
| | "bytes" |
| | "context" |
| | "crypto/sha256" |
| | "errors" |
| | "fmt" |
| | "internal/godebug" |
| | "io" |
| | "math/bits" |
| | "os" |
| | "path/filepath" |
| | "reflect" |
| | "runtime" |
| | "strings" |
| | "time" |
| | ) |
| |
|
| | |
| | |
| | type CoordinateFuzzingOpts struct { |
| | |
| | |
| | Log io.Writer |
| |
|
| | |
| | |
| | Timeout time.Duration |
| |
|
| | |
| | |
| | Limit int64 |
| |
|
| | |
| | |
| | |
| | |
| | MinimizeTimeout time.Duration |
| |
|
| | |
| | |
| | |
| | |
| | |
| | MinimizeLimit int64 |
| |
|
| | |
| | |
| | Parallel int |
| |
|
| | |
| | |
| | Seed []CorpusEntry |
| |
|
| | |
| | |
| | Types []reflect.Type |
| |
|
| | |
| | |
| | CorpusDir string |
| |
|
| | |
| | |
| | CacheDir string |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | func CoordinateFuzzing(ctx context.Context, opts CoordinateFuzzingOpts) (err error) { |
| | if err := ctx.Err(); err != nil { |
| | return err |
| | } |
| | if opts.Log == nil { |
| | opts.Log = io.Discard |
| | } |
| | if opts.Parallel == 0 { |
| | opts.Parallel = runtime.GOMAXPROCS(0) |
| | } |
| | if opts.Limit > 0 && int64(opts.Parallel) > opts.Limit { |
| | |
| | opts.Parallel = int(opts.Limit) |
| | } |
| |
|
| | c, err := newCoordinator(opts) |
| | if err != nil { |
| | return err |
| | } |
| |
|
| | if opts.Timeout > 0 { |
| | var cancel func() |
| | ctx, cancel = context.WithTimeout(ctx, opts.Timeout) |
| | defer cancel() |
| | } |
| |
|
| | |
| | fuzzCtx, cancelWorkers := context.WithCancel(ctx) |
| | defer cancelWorkers() |
| | doneC := ctx.Done() |
| |
|
| | |
| | var fuzzErr error |
| | stopping := false |
| | stop := func(err error) { |
| | if shouldPrintDebugInfo() { |
| | _, file, line, ok := runtime.Caller(1) |
| | if ok { |
| | c.debugLogf("stop called at %s:%d. stopping: %t", file, line, stopping) |
| | } else { |
| | c.debugLogf("stop called at unknown. stopping: %t", stopping) |
| | } |
| | } |
| |
|
| | if err == fuzzCtx.Err() || isInterruptError(err) { |
| | |
| | |
| | |
| | err = nil |
| | } |
| | if err != nil && (fuzzErr == nil || fuzzErr == ctx.Err()) { |
| | fuzzErr = err |
| | } |
| | if stopping { |
| | return |
| | } |
| | stopping = true |
| | cancelWorkers() |
| | doneC = nil |
| | } |
| |
|
| | |
| | |
| | crashWritten := false |
| | defer func() { |
| | if c.crashMinimizing == nil || crashWritten { |
| | return |
| | } |
| | werr := writeToCorpus(&c.crashMinimizing.entry, opts.CorpusDir) |
| | if werr != nil { |
| | err = fmt.Errorf("%w\n%v", err, werr) |
| | return |
| | } |
| | if err == nil { |
| | err = &crashError{ |
| | path: c.crashMinimizing.entry.Path, |
| | err: errors.New(c.crashMinimizing.crasherMsg), |
| | } |
| | } |
| | }() |
| |
|
| | |
| | |
| | dir := "" |
| | binPath := os.Args[0] |
| | args := append([]string{"-test.fuzzworker"}, os.Args[1:]...) |
| | env := os.Environ() |
| |
|
| | errC := make(chan error) |
| | workers := make([]*worker, opts.Parallel) |
| | for i := range workers { |
| | var err error |
| | workers[i], err = newWorker(c, dir, binPath, args, env) |
| | if err != nil { |
| | return err |
| | } |
| | } |
| | for i := range workers { |
| | w := workers[i] |
| | go func() { |
| | err := w.coordinate(fuzzCtx) |
| | if fuzzCtx.Err() != nil || isInterruptError(err) { |
| | err = nil |
| | } |
| | cleanErr := w.cleanup() |
| | if err == nil { |
| | err = cleanErr |
| | } |
| | errC <- err |
| | }() |
| | } |
| |
|
| | |
| | |
| | |
| | activeWorkers := len(workers) |
| | statTicker := time.NewTicker(3 * time.Second) |
| | defer statTicker.Stop() |
| | defer c.logStats() |
| |
|
| | c.logStats() |
| | for { |
| | |
| | if c.opts.Limit > 0 && c.count >= c.opts.Limit { |
| | stop(nil) |
| | } |
| |
|
| | var inputC chan fuzzInput |
| | input, ok := c.peekInput() |
| | if ok && c.crashMinimizing == nil && !stopping { |
| | inputC = c.inputC |
| | } |
| |
|
| | var minimizeC chan fuzzMinimizeInput |
| | minimizeInput, ok := c.peekMinimizeInput() |
| | if ok && !stopping { |
| | minimizeC = c.minimizeC |
| | } |
| |
|
| | select { |
| | case <-doneC: |
| | |
| | |
| | stop(ctx.Err()) |
| |
|
| | case err := <-errC: |
| | |
| | stop(err) |
| | activeWorkers-- |
| | if activeWorkers == 0 { |
| | return fuzzErr |
| | } |
| |
|
| | case result := <-c.resultC: |
| | |
| | if stopping { |
| | break |
| | } |
| | c.updateStats(result) |
| |
|
| | if result.crasherMsg != "" { |
| | if c.warmupRun() && result.entry.IsSeed { |
| | target := filepath.Base(c.opts.CorpusDir) |
| | fmt.Fprintf(c.opts.Log, "failure while testing seed corpus entry: %s/%s\n", target, testName(result.entry.Parent)) |
| | stop(errors.New(result.crasherMsg)) |
| | break |
| | } |
| | if c.canMinimize() && result.canMinimize { |
| | if c.crashMinimizing != nil { |
| | |
| | |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf("found unminimized crasher, skipping in favor of minimizable crasher") |
| | } |
| | break |
| | } |
| | |
| | |
| | |
| | c.crashMinimizing = &result |
| | fmt.Fprintf(c.opts.Log, "fuzz: minimizing %d-byte failing input file\n", len(result.entry.Data)) |
| | c.queueForMinimization(result, nil) |
| | } else if !crashWritten { |
| | |
| | |
| | err := writeToCorpus(&result.entry, opts.CorpusDir) |
| | if err == nil { |
| | crashWritten = true |
| | err = &crashError{ |
| | path: result.entry.Path, |
| | err: errors.New(result.crasherMsg), |
| | } |
| | } |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "found crasher, id: %s, parent: %s, gen: %d, size: %d, exec time: %s", |
| | result.entry.Path, |
| | result.entry.Parent, |
| | result.entry.Generation, |
| | len(result.entry.Data), |
| | result.entryDuration, |
| | ) |
| | } |
| | stop(err) |
| | } |
| | } else if result.coverageData != nil { |
| | if c.warmupRun() { |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "processed an initial input, id: %s, new bits: %d, size: %d, exec time: %s", |
| | result.entry.Parent, |
| | countBits(diffCoverage(c.coverageMask, result.coverageData)), |
| | len(result.entry.Data), |
| | result.entryDuration, |
| | ) |
| | } |
| | c.updateCoverage(result.coverageData) |
| | c.warmupInputLeft-- |
| | if c.warmupInputLeft == 0 { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel) |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "finished processing input corpus, entries: %d, initial coverage bits: %d", |
| | len(c.corpus.entries), |
| | countBits(c.coverageMask), |
| | ) |
| | } |
| | } |
| | } else if keepCoverage := diffCoverage(c.coverageMask, result.coverageData); keepCoverage != nil { |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | if c.canMinimize() && result.canMinimize && c.crashMinimizing == nil { |
| | |
| | |
| | c.queueForMinimization(result, keepCoverage) |
| | } else { |
| | |
| | inputSize := len(result.entry.Data) |
| | entryNew, err := c.addCorpusEntries(true, result.entry) |
| | if err != nil { |
| | stop(err) |
| | break |
| | } |
| | if !entryNew { |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "ignoring duplicate input which increased coverage, id: %s", |
| | result.entry.Path, |
| | ) |
| | } |
| | break |
| | } |
| | c.updateCoverage(keepCoverage) |
| | c.inputQueue.enqueue(result.entry) |
| | c.interestingCount++ |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "new interesting input, id: %s, parent: %s, gen: %d, new bits: %d, total bits: %d, size: %d, exec time: %s", |
| | result.entry.Path, |
| | result.entry.Parent, |
| | result.entry.Generation, |
| | countBits(keepCoverage), |
| | countBits(c.coverageMask), |
| | inputSize, |
| | result.entryDuration, |
| | ) |
| | } |
| | } |
| | } else { |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "worker reported interesting input that doesn't expand coverage, id: %s, parent: %s, canMinimize: %t", |
| | result.entry.Path, |
| | result.entry.Parent, |
| | result.canMinimize, |
| | ) |
| | } |
| | } |
| | } else if c.warmupRun() { |
| | |
| | |
| | c.warmupInputLeft-- |
| | if c.warmupInputLeft == 0 { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel) |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "finished testing-only phase, entries: %d", |
| | len(c.corpus.entries), |
| | ) |
| | } |
| | } |
| | } |
| |
|
| | case inputC <- input: |
| | |
| | c.sentInput(input) |
| |
|
| | case minimizeC <- minimizeInput: |
| | |
| | c.sentMinimizeInput(minimizeInput) |
| |
|
| | case <-statTicker.C: |
| | c.logStats() |
| | } |
| | } |
| |
|
| | |
| | |
| | } |
| |
|
| | |
| | |
| | |
| | type crashError struct { |
| | path string |
| | err error |
| | } |
| |
|
| | func (e *crashError) Error() string { |
| | return e.err.Error() |
| | } |
| |
|
| | func (e *crashError) Unwrap() error { |
| | return e.err |
| | } |
| |
|
| | func (e *crashError) CrashPath() string { |
| | return e.path |
| | } |
| |
|
| | type corpus struct { |
| | entries []CorpusEntry |
| | hashes map[[sha256.Size]byte]bool |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | func (c *coordinator) addCorpusEntries(addToCache bool, entries ...CorpusEntry) (bool, error) { |
| | noDupes := true |
| | for _, e := range entries { |
| | data, err := corpusEntryData(e) |
| | if err != nil { |
| | return false, err |
| | } |
| | h := sha256.Sum256(data) |
| | if c.corpus.hashes[h] { |
| | noDupes = false |
| | continue |
| | } |
| | if addToCache { |
| | if err := writeToCorpus(&e, c.opts.CacheDir); err != nil { |
| | return false, err |
| | } |
| | |
| | |
| | |
| | e.Data = nil |
| | } |
| | c.corpus.hashes[h] = true |
| | c.corpus.entries = append(c.corpus.entries, e) |
| | } |
| | return noDupes, nil |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | type CorpusEntry = struct { |
| | Parent string |
| |
|
| | |
| | |
| | |
| | Path string |
| |
|
| | |
| | |
| | |
| | Data []byte |
| |
|
| | |
| | Values []any |
| |
|
| | Generation int |
| |
|
| | |
| | IsSeed bool |
| | } |
| |
|
| | |
| | |
| | func corpusEntryData(ce CorpusEntry) ([]byte, error) { |
| | if ce.Data != nil { |
| | return ce.Data, nil |
| | } |
| |
|
| | return os.ReadFile(ce.Path) |
| | } |
| |
|
| | type fuzzInput struct { |
| | |
| | |
| | entry CorpusEntry |
| |
|
| | |
| | |
| | timeout time.Duration |
| |
|
| | |
| | |
| | |
| | |
| | limit int64 |
| |
|
| | |
| | |
| | warmup bool |
| |
|
| | |
| | coverageData []byte |
| | } |
| |
|
| | type fuzzResult struct { |
| | |
| | entry CorpusEntry |
| |
|
| | |
| | crasherMsg string |
| |
|
| | |
| | |
| | canMinimize bool |
| |
|
| | |
| | coverageData []byte |
| |
|
| | |
| | |
| | limit int64 |
| |
|
| | |
| | count int64 |
| |
|
| | |
| | totalDuration time.Duration |
| |
|
| | |
| | entryDuration time.Duration |
| | } |
| |
|
| | type fuzzMinimizeInput struct { |
| | |
| | entry CorpusEntry |
| |
|
| | |
| | |
| | |
| | crasherMsg string |
| |
|
| | |
| | |
| | |
| | limit int64 |
| |
|
| | |
| | |
| | timeout time.Duration |
| |
|
| | |
| | |
| | |
| | |
| | keepCoverage []byte |
| | } |
| |
|
| | |
| | |
| | type coordinator struct { |
| | opts CoordinateFuzzingOpts |
| |
|
| | |
| | |
| | startTime time.Time |
| |
|
| | |
| | |
| | inputC chan fuzzInput |
| |
|
| | |
| | |
| | minimizeC chan fuzzMinimizeInput |
| |
|
| | |
| | |
| | resultC chan fuzzResult |
| |
|
| | |
| | count int64 |
| |
|
| | |
| | |
| | countLastLog int64 |
| |
|
| | |
| | timeLastLog time.Time |
| |
|
| | |
| | |
| | interestingCount int |
| |
|
| | |
| | |
| | |
| | |
| | warmupInputCount int |
| |
|
| | |
| | |
| | |
| | warmupInputLeft int |
| |
|
| | |
| | |
| | duration time.Duration |
| |
|
| | |
| | |
| | countWaiting int64 |
| |
|
| | |
| | |
| | corpus corpus |
| |
|
| | |
| | |
| | minimizationAllowed bool |
| |
|
| | |
| | |
| | |
| | inputQueue queue |
| |
|
| | |
| | |
| | |
| | minimizeQueue queue |
| |
|
| | |
| | crashMinimizing *fuzzResult |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | coverageMask []byte |
| | } |
| |
|
| | func newCoordinator(opts CoordinateFuzzingOpts) (*coordinator, error) { |
| | |
| | for i := range opts.Seed { |
| | if opts.Seed[i].Data == nil && opts.Seed[i].Values != nil { |
| | opts.Seed[i].Data = marshalCorpusFile(opts.Seed[i].Values...) |
| | } |
| | } |
| | c := &coordinator{ |
| | opts: opts, |
| | startTime: time.Now(), |
| | inputC: make(chan fuzzInput), |
| | minimizeC: make(chan fuzzMinimizeInput), |
| | resultC: make(chan fuzzResult), |
| | timeLastLog: time.Now(), |
| | corpus: corpus{hashes: make(map[[sha256.Size]byte]bool)}, |
| | } |
| | if err := c.readCache(); err != nil { |
| | return nil, err |
| | } |
| | if opts.MinimizeLimit > 0 || opts.MinimizeTimeout > 0 { |
| | for _, t := range opts.Types { |
| | if isMinimizable(t) { |
| | c.minimizationAllowed = true |
| | break |
| | } |
| | } |
| | } |
| |
|
| | covSize := len(coverage()) |
| | if covSize == 0 { |
| | fmt.Fprintf(c.opts.Log, "warning: the test binary was not built with coverage instrumentation, so fuzzing will run without coverage guidance and may be inefficient\n") |
| | |
| | |
| | |
| | c.warmupInputCount = len(c.opts.Seed) |
| | for _, e := range c.opts.Seed { |
| | c.inputQueue.enqueue(e) |
| | } |
| | } else { |
| | c.warmupInputCount = len(c.corpus.entries) |
| | for _, e := range c.corpus.entries { |
| | c.inputQueue.enqueue(e) |
| | } |
| | |
| | c.coverageMask = make([]byte, covSize) |
| | } |
| | c.warmupInputLeft = c.warmupInputCount |
| |
|
| | if len(c.corpus.entries) == 0 { |
| | fmt.Fprintf(c.opts.Log, "warning: starting with empty corpus\n") |
| | var vals []any |
| | for _, t := range opts.Types { |
| | vals = append(vals, zeroValue(t)) |
| | } |
| | data := marshalCorpusFile(vals...) |
| | h := sha256.Sum256(data) |
| | name := fmt.Sprintf("%x", h[:4]) |
| | c.addCorpusEntries(false, CorpusEntry{Path: name, Data: data}) |
| | } |
| |
|
| | return c, nil |
| | } |
| |
|
| | func (c *coordinator) updateStats(result fuzzResult) { |
| | c.count += result.count |
| | c.countWaiting -= result.limit |
| | c.duration += result.totalDuration |
| | } |
| |
|
| | func (c *coordinator) logStats() { |
| | now := time.Now() |
| | if c.warmupRun() { |
| | runSoFar := c.warmupInputCount - c.warmupInputLeft |
| | if coverageEnabled { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount) |
| | } else { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount) |
| | } |
| | } else if c.crashMinimizing != nil { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, minimizing\n", c.elapsed()) |
| | } else { |
| | rate := float64(c.count-c.countLastLog) / now.Sub(c.timeLastLog).Seconds() |
| | if coverageEnabled { |
| | total := c.warmupInputCount + c.interestingCount |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec), new interesting: %d (total: %d)\n", c.elapsed(), c.count, rate, c.interestingCount, total) |
| | } else { |
| | fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec)\n", c.elapsed(), c.count, rate) |
| | } |
| | } |
| | c.countLastLog = c.count |
| | c.timeLastLog = now |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | func (c *coordinator) peekInput() (fuzzInput, bool) { |
| | if c.opts.Limit > 0 && c.count+c.countWaiting >= c.opts.Limit { |
| | |
| | |
| | return fuzzInput{}, false |
| | } |
| | if c.inputQueue.len == 0 { |
| | if c.warmupRun() { |
| | |
| | |
| | return fuzzInput{}, false |
| | } |
| | c.refillInputQueue() |
| | } |
| |
|
| | entry, ok := c.inputQueue.peek() |
| | if !ok { |
| | panic("input queue empty after refill") |
| | } |
| | input := fuzzInput{ |
| | entry: entry.(CorpusEntry), |
| | timeout: workerFuzzDuration, |
| | warmup: c.warmupRun(), |
| | } |
| | if c.coverageMask != nil { |
| | input.coverageData = bytes.Clone(c.coverageMask) |
| | } |
| | if input.warmup { |
| | |
| | |
| | input.limit = 1 |
| | return input, true |
| | } |
| |
|
| | if c.opts.Limit > 0 { |
| | input.limit = c.opts.Limit / int64(c.opts.Parallel) |
| | if c.opts.Limit%int64(c.opts.Parallel) > 0 { |
| | input.limit++ |
| | } |
| | remaining := c.opts.Limit - c.count - c.countWaiting |
| | if input.limit > remaining { |
| | input.limit = remaining |
| | } |
| | } |
| | return input, true |
| | } |
| |
|
| | |
| | func (c *coordinator) sentInput(input fuzzInput) { |
| | c.inputQueue.dequeue() |
| | c.countWaiting += input.limit |
| | } |
| |
|
| | |
| | |
| | func (c *coordinator) refillInputQueue() { |
| | for _, e := range c.corpus.entries { |
| | c.inputQueue.enqueue(e) |
| | } |
| | } |
| |
|
| | |
| | |
| | func (c *coordinator) queueForMinimization(result fuzzResult, keepCoverage []byte) { |
| | if shouldPrintDebugInfo() { |
| | c.debugLogf( |
| | "queueing input for minimization, id: %s, parent: %s, keepCoverage: %t, crasher: %t", |
| | result.entry.Path, |
| | result.entry.Parent, |
| | keepCoverage != nil, |
| | result.crasherMsg != "", |
| | ) |
| | } |
| | if result.crasherMsg != "" { |
| | c.minimizeQueue.clear() |
| | } |
| |
|
| | input := fuzzMinimizeInput{ |
| | entry: result.entry, |
| | crasherMsg: result.crasherMsg, |
| | keepCoverage: keepCoverage, |
| | } |
| | c.minimizeQueue.enqueue(input) |
| | } |
| |
|
| | |
| | |
| | func (c *coordinator) peekMinimizeInput() (fuzzMinimizeInput, bool) { |
| | if !c.canMinimize() { |
| | |
| | |
| | return fuzzMinimizeInput{}, false |
| | } |
| | v, ok := c.minimizeQueue.peek() |
| | if !ok { |
| | return fuzzMinimizeInput{}, false |
| | } |
| | input := v.(fuzzMinimizeInput) |
| |
|
| | if c.opts.MinimizeTimeout > 0 { |
| | input.timeout = c.opts.MinimizeTimeout |
| | } |
| | if c.opts.MinimizeLimit > 0 { |
| | input.limit = c.opts.MinimizeLimit |
| | } else if c.opts.Limit > 0 { |
| | if input.crasherMsg != "" { |
| | input.limit = c.opts.Limit |
| | } else { |
| | input.limit = c.opts.Limit / int64(c.opts.Parallel) |
| | if c.opts.Limit%int64(c.opts.Parallel) > 0 { |
| | input.limit++ |
| | } |
| | } |
| | } |
| | if c.opts.Limit > 0 { |
| | remaining := c.opts.Limit - c.count - c.countWaiting |
| | if input.limit > remaining { |
| | input.limit = remaining |
| | } |
| | } |
| | return input, true |
| | } |
| |
|
| | |
| | |
| | func (c *coordinator) sentMinimizeInput(input fuzzMinimizeInput) { |
| | c.minimizeQueue.dequeue() |
| | c.countWaiting += input.limit |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | func (c *coordinator) warmupRun() bool { |
| | return c.warmupInputLeft > 0 |
| | } |
| |
|
| | |
| | |
| | |
| | func (c *coordinator) updateCoverage(newCoverage []byte) int { |
| | if len(newCoverage) != len(c.coverageMask) { |
| | panic(fmt.Sprintf("number of coverage counters changed at runtime: %d, expected %d", len(newCoverage), len(c.coverageMask))) |
| | } |
| | newBitCount := 0 |
| | for i := range newCoverage { |
| | diff := newCoverage[i] &^ c.coverageMask[i] |
| | newBitCount += bits.OnesCount8(diff) |
| | c.coverageMask[i] |= newCoverage[i] |
| | } |
| | return newBitCount |
| | } |
| |
|
| | |
| | |
| | func (c *coordinator) canMinimize() bool { |
| | return c.minimizationAllowed && |
| | (c.opts.Limit == 0 || c.count+c.countWaiting < c.opts.Limit) |
| | } |
| |
|
| | func (c *coordinator) elapsed() time.Duration { |
| | return time.Since(c.startTime).Round(1 * time.Second) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | func (c *coordinator) readCache() error { |
| | if _, err := c.addCorpusEntries(false, c.opts.Seed...); err != nil { |
| | return err |
| | } |
| | entries, err := ReadCorpus(c.opts.CacheDir, c.opts.Types) |
| | if err != nil { |
| | if _, ok := err.(*MalformedCorpusError); !ok { |
| | |
| | |
| | return err |
| | } |
| | |
| | |
| | |
| | } |
| | if _, err := c.addCorpusEntries(false, entries...); err != nil { |
| | return err |
| | } |
| | return nil |
| | } |
| |
|
| | |
| | |
| | |
| | type MalformedCorpusError struct { |
| | errs []error |
| | } |
| |
|
| | func (e *MalformedCorpusError) Error() string { |
| | var msgs []string |
| | for _, s := range e.errs { |
| | msgs = append(msgs, s.Error()) |
| | } |
| | return strings.Join(msgs, "\n") |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | func ReadCorpus(dir string, types []reflect.Type) ([]CorpusEntry, error) { |
| | files, err := os.ReadDir(dir) |
| | if os.IsNotExist(err) { |
| | return nil, nil |
| | } else if err != nil { |
| | return nil, fmt.Errorf("reading seed corpus from testdata: %v", err) |
| | } |
| | var corpus []CorpusEntry |
| | var errs []error |
| | for _, file := range files { |
| | |
| | |
| | |
| | |
| | |
| | if file.IsDir() { |
| | continue |
| | } |
| | filename := filepath.Join(dir, file.Name()) |
| | data, err := os.ReadFile(filename) |
| | if err != nil { |
| | return nil, fmt.Errorf("failed to read corpus file: %v", err) |
| | } |
| | var vals []any |
| | vals, err = readCorpusData(data, types) |
| | if err != nil { |
| | errs = append(errs, fmt.Errorf("%q: %v", filename, err)) |
| | continue |
| | } |
| | corpus = append(corpus, CorpusEntry{Path: filename, Values: vals}) |
| | } |
| | if len(errs) > 0 { |
| | return corpus, &MalformedCorpusError{errs: errs} |
| | } |
| | return corpus, nil |
| | } |
| |
|
| | func readCorpusData(data []byte, types []reflect.Type) ([]any, error) { |
| | vals, err := unmarshalCorpusFile(data) |
| | if err != nil { |
| | return nil, fmt.Errorf("unmarshal: %v", err) |
| | } |
| | if err = CheckCorpus(vals, types); err != nil { |
| | return nil, err |
| | } |
| | return vals, nil |
| | } |
| |
|
| | |
| | |
| | func CheckCorpus(vals []any, types []reflect.Type) error { |
| | if len(vals) != len(types) { |
| | return fmt.Errorf("wrong number of values in corpus entry: %d, want %d", len(vals), len(types)) |
| | } |
| | valsT := make([]reflect.Type, len(vals)) |
| | for valsI, v := range vals { |
| | valsT[valsI] = reflect.TypeOf(v) |
| | } |
| | for i := range types { |
| | if valsT[i] != types[i] { |
| | return fmt.Errorf("mismatched types in corpus entry: %v, want %v", valsT, types) |
| | } |
| | } |
| | return nil |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | func writeToCorpus(entry *CorpusEntry, dir string) (err error) { |
| | sum := fmt.Sprintf("%x", sha256.Sum256(entry.Data))[:16] |
| | entry.Path = filepath.Join(dir, sum) |
| | if err := os.MkdirAll(dir, 0777); err != nil { |
| | return err |
| | } |
| | if err := os.WriteFile(entry.Path, entry.Data, 0666); err != nil { |
| | os.Remove(entry.Path) |
| | return err |
| | } |
| | return nil |
| | } |
| |
|
| | func testName(path string) string { |
| | return filepath.Base(path) |
| | } |
| |
|
| | func zeroValue(t reflect.Type) any { |
| | for _, v := range zeroVals { |
| | if reflect.TypeOf(v) == t { |
| | return v |
| | } |
| | } |
| | panic(fmt.Sprintf("unsupported type: %v", t)) |
| | } |
| |
|
| | var zeroVals []any = []any{ |
| | []byte(""), |
| | string(""), |
| | false, |
| | byte(0), |
| | rune(0), |
| | float32(0), |
| | float64(0), |
| | int(0), |
| | int8(0), |
| | int16(0), |
| | int32(0), |
| | int64(0), |
| | uint(0), |
| | uint8(0), |
| | uint16(0), |
| | uint32(0), |
| | uint64(0), |
| | } |
| |
|
| | var debugInfo = godebug.New("#fuzzdebug").Value() == "1" |
| |
|
| | func shouldPrintDebugInfo() bool { |
| | return debugInfo |
| | } |
| |
|
| | func (c *coordinator) debugLogf(format string, args ...any) { |
| | t := time.Now().Format("2006-01-02 15:04:05.999999999") |
| | fmt.Fprintf(c.opts.Log, t+" DEBUG "+format+"\n", args...) |
| | } |
| |
|